after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __getitem__(self, key):
try:
return super(LocalWeakReferencedCache, self).__getitem__(key)
except (TypeError, KeyError):
return None # key is either not weak-referenceable or not cached
|
def __getitem__(self, key):
try:
return super(LocalWeakReferencedCache, self).__getitem__(key)
except TypeError:
return None # key is not weak-referenceable, it's not cached
|
https://github.com/scrapy/scrapy/issues/4597
|
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/app/python/lib/python3.8/site-packages/scrapy/core/downloader/middleware.py", line 42, in process_request
defer.returnValue((yield download_func(request=request, spider=spider)))
File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1362, in returnValue
raise _DefGen_Return(val)
twisted.internet.defer._DefGen_Return: <200 https://www.example.com>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/python/lib/python3.8/site-packages/scrapy/utils/defer.py", line 55, in mustbe_deferred
result = f(*args, **kw)
File "/app/python/lib/python3.8/site-packages/scrapy/core/spidermw.py", line 60, in process_spider_input
return scrape_func(response, request, spider)
File "/app/python/lib/python3.8/site-packages/scrapy/core/scraper.py", line 148, in call_spider
warn_on_generator_with_return_value(spider, callback)
File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 202, in warn_on_generator_with_return_value
if is_generator_with_return_value(callable):
File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 180, in is_generator_with_return_value
return _generator_callbacks_cache[callable]
File "/app/python/lib/python3.8/site-packages/scrapy/utils/datatypes.py", line 281, in __getitem__
return super(LocalWeakReferencedCache, self).__getitem__(key)
File "/usr/local/lib/python3.8/weakref.py", line 383, in __getitem__
return self.data[ref(key)]
KeyError: <weakref at 0x7f06ff011720; to 'method' at 0x7f07042b5e00 (parse_foo)>
|
KeyError
|
def __init__(self, stats):
self.stats = stats
self.start_time = None
|
def __init__(self, stats):
self.stats = stats
|
https://github.com/scrapy/scrapy/issues/4007
|
2019-09-09 13:51:23 [scrapy.utils.signal] ERROR: Error caught on signal handler: <bound method CoreStats.spider_closed of <scrapy.extensions.corestats.CoreStats object at 0x7f86269cac18>>
Traceback (most recent call last):
File ".../lib/python3.6/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File ".../lib/python3.6/site-packages/pydispatch/robustapply.py", line 55, in robustApply
return receiver(*arguments, **named)
File ".../lib/python3.6/site-packages/scrapy/extensions/corestats.py", line 28, in spider_closed
elapsed_time = finish_time - self.stats.get_value('start_time')
TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'
|
TypeError
|
def spider_opened(self, spider):
self.start_time = datetime.utcnow()
self.stats.set_value("start_time", self.start_time, spider=spider)
|
def spider_opened(self, spider):
self.stats.set_value("start_time", datetime.datetime.utcnow(), spider=spider)
|
https://github.com/scrapy/scrapy/issues/4007
|
2019-09-09 13:51:23 [scrapy.utils.signal] ERROR: Error caught on signal handler: <bound method CoreStats.spider_closed of <scrapy.extensions.corestats.CoreStats object at 0x7f86269cac18>>
Traceback (most recent call last):
File ".../lib/python3.6/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File ".../lib/python3.6/site-packages/pydispatch/robustapply.py", line 55, in robustApply
return receiver(*arguments, **named)
File ".../lib/python3.6/site-packages/scrapy/extensions/corestats.py", line 28, in spider_closed
elapsed_time = finish_time - self.stats.get_value('start_time')
TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'
|
TypeError
|
def spider_closed(self, spider, reason):
finish_time = datetime.utcnow()
elapsed_time = finish_time - self.start_time
elapsed_time_seconds = elapsed_time.total_seconds()
self.stats.set_value("elapsed_time_seconds", elapsed_time_seconds, spider=spider)
self.stats.set_value("finish_time", finish_time, spider=spider)
self.stats.set_value("finish_reason", reason, spider=spider)
|
def spider_closed(self, spider, reason):
finish_time = datetime.datetime.utcnow()
elapsed_time = finish_time - self.stats.get_value("start_time")
elapsed_time_seconds = elapsed_time.total_seconds()
self.stats.set_value("elapsed_time_seconds", elapsed_time_seconds, spider=spider)
self.stats.set_value("finish_time", finish_time, spider=spider)
self.stats.set_value("finish_reason", reason, spider=spider)
|
https://github.com/scrapy/scrapy/issues/4007
|
2019-09-09 13:51:23 [scrapy.utils.signal] ERROR: Error caught on signal handler: <bound method CoreStats.spider_closed of <scrapy.extensions.corestats.CoreStats object at 0x7f86269cac18>>
Traceback (most recent call last):
File ".../lib/python3.6/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File ".../lib/python3.6/site-packages/pydispatch/robustapply.py", line 55, in robustApply
return receiver(*arguments, **named)
File ".../lib/python3.6/site-packages/scrapy/extensions/corestats.py", line 28, in spider_closed
elapsed_time = finish_time - self.stats.get_value('start_time')
TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'
|
TypeError
|
def file_path(self, request, response=None, info=None):
media_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
media_ext = os.path.splitext(request.url)[1]
# Handles empty and wild extensions by trying to guess the
# mime type then extension or default to empty string otherwise
if media_ext not in mimetypes.types_map:
media_ext = ""
media_type = mimetypes.guess_type(request.url)[0]
if media_type:
media_ext = mimetypes.guess_extension(media_type)
return "full/%s%s" % (media_guid, media_ext)
|
def file_path(self, request, response=None, info=None):
media_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
media_ext = os.path.splitext(request.url)[1]
return "full/%s%s" % (media_guid, media_ext)
|
https://github.com/scrapy/scrapy/issues/3953
|
Traceback (most recent call last):
File "c:\program files\python37\lib\site-packages\scrapy\pipelines\files.py", line 419, in media_downloaded
checksum = self.file_downloaded(response, request, info)
File "c:\program files\python37\lib\site-packages\scrapy\pipelines\files.py", line 452, in file_downloaded
self.store.persist_file(path, buf, info)
File "c:\program files\python37\lib\site-packages\scrapy\pipelines\files.py", line 53, in persist_file
with open(absolute_path, 'wb') as f:
OSError: [Errno 22] Invalid argument: 'E:\\2019-08-12\\resources\\885443110bae0e1149e017dbea5ca3935efa38c0.com%2Fimages%2Fdimse%2F5845cadfecd996e0372f%2F108a4af73772ae197fa2c4ec4e9fe7a47390433c%2FY3JvcD0xMTc0JTJDNTgwJTJDMCUyQzAmcXVhbGl0eT04NSZmb3JtYXQ9anBnJnJlc2l6ZT0xNjAwJTJDNzkxJmltYWdlX3VyaT1odHRwcyUzQSUyRiUyRnMueWltZy5jb20lMkZvcyUyRmNyZWF0ci11cGxvYWRlZC1pbWFnZXMlMkYyMDE5LTA4JTJGMWJmZGQxNDAtYjliYy0xMWU5LWJmZjMtMjMyNzcwMTg1MzE5JmNsaWVudD1hMWFjYWMzZTFiMzI5MDkxN2Q5MiZzaWduYXR1cmU9OTFiNzQ3Y2MyZTY5ODY3OGIxNWI0OTkyMjdjM2NmZWRlYTE1NGIxOA%3D%3D&client=a1acac3e1b3290917d92&signature=6517aece82e79d536edeaccc275ad88090df0252'
|
OSError
|
def _pickle_serialize(obj):
try:
return pickle.dumps(obj, protocol=2)
# Python <= 3.4 raises pickle.PicklingError here while
# 3.5 <= Python < 3.6 raises AttributeError and
# Python >= 3.6 raises TypeError
except (pickle.PicklingError, AttributeError, TypeError) as e:
raise ValueError(str(e))
|
def _pickle_serialize(obj):
try:
return pickle.dumps(obj, protocol=2)
# Python>=3.5 raises AttributeError here while
# Python<=3.4 raises pickle.PicklingError
except (pickle.PicklingError, AttributeError) as e:
raise ValueError(str(e))
|
https://github.com/scrapy/scrapy/issues/3054
|
root@04bfc6cf84cd:/# scrapy version -v
Scrapy : 1.3.3
lxml : 3.7.2.0
libxml2 : 2.9.3
cssselect : 1.0.1
parsel : 1.1.0
w3lib : 1.17.0
Twisted : 16.6.0
Python : 2.7.14 (default, Dec 12 2017, 16:55:09) - [GCC 4.9.2]
pyOpenSSL : 16.2.0 (OpenSSL 1.0.1t 3 May 2016)
Platform : Linux-4.9.44-linuxkit-aufs-x86_64-with-debian-8.10
root@04bfc6cf84cd:/# scrapy shell "http://example.org"
2017-12-29 16:49:27 [scrapy.utils.log] INFO: Scrapy 1.3.3 started (bot: scrapybot)
(...)
from six.moves import cPickle as pickle
s2 = pickle.loads(pickle.dumps(response.selector, protocol=2))
response.selector.css('a')
[<Selector xpath=u'descendant-or-self::a' data=u'<a href="http://www.iana.org/domains/exa'>]
s2.css('a')
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/local/lib/python2.7/site-packages/parsel/selector.py", line 227, in css
return self.xpath(self._css2xpath(query))
File "/usr/local/lib/python2.7/site-packages/parsel/selector.py", line 203, in xpath
**kwargs)
File "src/lxml/lxml.etree.pyx", line 1584, in lxml.etree._Element.xpath (src/lxml/lxml.etree.c:59349)
File "src/lxml/xpath.pxi", line 257, in lxml.etree.XPathElementEvaluator.__init__ (src/lxml/lxml.etree.c:170478)
File "src/lxml/apihelpers.pxi", line 19, in lxml.etree._assertValidNode (src/lxml/lxml.etree.c:16482)
AssertionError: invalid Element proxy at 140144569743064
|
AssertionError
|
def getHostByName(self, name, timeout=None):
if name in dnscache:
return defer.succeed(dnscache[name])
# in Twisted<=16.6, getHostByName() is always called with
# a default timeout of 60s (actually passed as (1, 3, 11, 45) tuple),
# so the input argument above is simply overridden
# to enforce Scrapy's DNS_TIMEOUT setting's value
timeout = (self.timeout,)
d = super(CachingThreadedResolver, self).getHostByName(name, timeout)
if dnscache.limit:
d.addCallback(self._cache_result, name)
return d
|
def getHostByName(self, name, timeout=None):
if name in dnscache:
return defer.succeed(dnscache[name])
# in Twisted<=16.6, getHostByName() is always called with
# a default timeout of 60s (actually passed as (1, 3, 11, 45) tuple),
# so the input argument above is simply overridden
# to enforce Scrapy's DNS_TIMEOUT setting's value
timeout = (self.timeout,)
d = super(CachingThreadedResolver, self).getHostByName(name, timeout)
d.addCallback(self._cache_result, name)
return d
|
https://github.com/scrapy/scrapy/issues/2811
|
2017-07-03 03:09:12 [twisted] CRITICAL: while looking up www.mydomain.com with <scrapy.resolver.CachingThreadedResolver object at 0x3fd0050>
Traceback (most recent call last):
File "/usr/lib64/python2.7/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/usr/lib64/python2.7/site-packages/scrapy/resolver.py", line 29, in _cache_result
dnscache[name] = result
File "/usr/lib64/python2.7/site-packages/scrapy/utils/datatypes.py", line 305, in __setitem__
self.popitem(last=False)
File "/usr/lib64/python2.7/collections.py", line 159, in popitem
raise KeyError('dictionary is empty')
KeyError: 'dictionary is empty'
2017-07-03 03:09:12 [scrapy.downloadermiddlewares.retry] DEBUG: Gave up retrying <GET //www.mydomain.com/> (failed 3 times): DNS lookup failed: no results for hostname lookup: www.mydomain.com.
Traceback (most recent call last):
File "/usr/bin/scrapy", line 11, in <module>
sys.exit(execute())
File "/usr/lib64/python2.7/site-packages/scrapy/cmdline.py", line 149, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/usr/lib64/python2.7/site-packages/scrapy/cmdline.py", line 89, in _run_print_help
func(*a, **kw)
File "/usr/lib64/python2.7/site-packages/scrapy/cmdline.py", line 156, in _run_command
cmd.run(args, opts)
File "/usr/lib64/python2.7/site-packages/scrapy/commands/shell.py", line 73, in run
shell.start(url=url, redirect=not opts.no_redirect)
File "/usr/lib64/python2.7/site-packages/scrapy/shell.py", line 48, in start
self.fetch(url, spider, redirect=redirect)
File "/usr/lib64/python2.7/site-packages/scrapy/shell.py", line 115, in fetch
reactor, self._schedule, request, spider)
File "/usr/lib64/python2.7/site-packages/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "<string>", line 2, in raiseException
twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: www.mydomain.com.
|
KeyError
|
def process_response(self, request, response, spider):
if request.method == "HEAD":
return response
if isinstance(response, Response):
content_encoding = response.headers.getlist("Content-Encoding")
if content_encoding:
encoding = content_encoding.pop()
decoded_body = self._decode(response.body, encoding.lower())
respcls = responsetypes.from_args(
headers=response.headers, url=response.url, body=decoded_body
)
kwargs = dict(cls=respcls, body=decoded_body)
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs["encoding"] = None
response = response.replace(**kwargs)
if not content_encoding:
del response.headers["Content-Encoding"]
return response
|
def process_response(self, request, response, spider):
if request.method == "HEAD":
return response
if isinstance(response, Response):
content_encoding = response.headers.getlist("Content-Encoding")
if content_encoding and not is_gzipped(response):
encoding = content_encoding.pop()
decoded_body = self._decode(response.body, encoding.lower())
respcls = responsetypes.from_args(
headers=response.headers, url=response.url, body=decoded_body
)
kwargs = dict(cls=respcls, body=decoded_body)
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs["encoding"] = None
response = response.replace(**kwargs)
if not content_encoding:
del response.headers["Content-Encoding"]
return response
|
https://github.com/scrapy/scrapy/issues/2389
|
$ scrapy runspider spider.py
2016-11-09 15:53:10 [scrapy] INFO: Scrapy 1.2.1 started (bot: scrapybot)
(...)
2016-11-09 15:53:10 [scrapy] INFO: Spider opened
2016-11-09 15:53:10 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2016-11-09 15:53:10 [scrapy] DEBUG: Telnet console listening on 127.0.0.1:6023
2016-11-09 15:53:11 [scrapy] DEBUG: Crawled (200) <GET http://www.newegg.com/Siteindex_USA.xml> (referer: None)
2016-11-09 15:53:11 [scrapy] DEBUG: Crawled (200) <GET http://www.newegg.com/Sitemap/USA/newegg_sitemap_store01.xml.gz> (referer: http://www.newegg.com/Siteindex_USA.xml)
2016-11-09 15:53:11 [scrapy] ERROR: Spider error processing <GET http://www.newegg.com/Sitemap/USA/newegg_sitemap_store01.xml.gz> (referer: http://www.newegg.com/Siteindex_USA.xml)
Traceback (most recent call last):
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/utils/defer.py", line 102, in iter_errback
yield next(it)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
for x in result:
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spiders/sitemap.py", line 44, in _parse_sitemap
s = Sitemap(body)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/utils/sitemap.py", line 17, in __init__
rt = self._root.tag
AttributeError: 'NoneType' object has no attribute 'tag'
|
AttributeError
|
def _get_sitemap_body(self, response):
"""Return the sitemap body contained in the given response,
or None if the response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
elif gzip_magic_number(response):
return gunzip(response.body)
# actual gzipped sitemap files are decompressed above ;
# if we are here (response body is not gzipped)
# and have a response for .xml.gz,
# it usually means that it was already gunzipped
# by HttpCompression middleware,
# the HTTP response being sent with "Content-Encoding: gzip"
# without actually being a .xml.gz file in the first place,
# merely XML gzip-compressed on the fly,
# in other word, here, we have plain XML
elif response.url.endswith(".xml") or response.url.endswith(".xml.gz"):
return response.body
|
def _get_sitemap_body(self, response):
"""Return the sitemap body contained in the given response,
or None if the response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
elif is_gzipped(response):
return gunzip(response.body)
elif response.url.endswith(".xml"):
return response.body
elif response.url.endswith(".xml.gz"):
return gunzip(response.body)
|
https://github.com/scrapy/scrapy/issues/2389
|
$ scrapy runspider spider.py
2016-11-09 15:53:10 [scrapy] INFO: Scrapy 1.2.1 started (bot: scrapybot)
(...)
2016-11-09 15:53:10 [scrapy] INFO: Spider opened
2016-11-09 15:53:10 [scrapy] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2016-11-09 15:53:10 [scrapy] DEBUG: Telnet console listening on 127.0.0.1:6023
2016-11-09 15:53:11 [scrapy] DEBUG: Crawled (200) <GET http://www.newegg.com/Siteindex_USA.xml> (referer: None)
2016-11-09 15:53:11 [scrapy] DEBUG: Crawled (200) <GET http://www.newegg.com/Sitemap/USA/newegg_sitemap_store01.xml.gz> (referer: http://www.newegg.com/Siteindex_USA.xml)
2016-11-09 15:53:11 [scrapy] ERROR: Spider error processing <GET http://www.newegg.com/Sitemap/USA/newegg_sitemap_store01.xml.gz> (referer: http://www.newegg.com/Siteindex_USA.xml)
Traceback (most recent call last):
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/utils/defer.py", line 102, in iter_errback
yield next(it)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
for x in result:
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spiders/sitemap.py", line 44, in _parse_sitemap
s = Sitemap(body)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/utils/sitemap.py", line 17, in __init__
rt = self._root.tag
AttributeError: 'NoneType' object has no attribute 'tag'
|
AttributeError
|
def process_response(self, request, response, spider):
if request.method == "HEAD":
return response
if isinstance(response, Response):
content_encoding = response.headers.getlist("Content-Encoding")
if content_encoding and not is_gzipped(response):
encoding = content_encoding.pop()
decoded_body = self._decode(response.body, encoding.lower())
respcls = responsetypes.from_args(
headers=response.headers, url=response.url, body=decoded_body
)
kwargs = dict(cls=respcls, body=decoded_body)
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs["encoding"] = None
response = response.replace(**kwargs)
if not content_encoding:
del response.headers["Content-Encoding"]
return response
|
def process_response(self, request, response, spider):
if request.method == "HEAD":
return response
if isinstance(response, Response):
content_encoding = response.headers.getlist("Content-Encoding")
if content_encoding and not is_gzipped(response):
encoding = content_encoding.pop()
decoded_body = self._decode(response.body, encoding.lower())
respcls = responsetypes.from_args(
headers=response.headers, url=response.url
)
kwargs = dict(cls=respcls, body=decoded_body)
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs["encoding"] = None
response = response.replace(**kwargs)
if not content_encoding:
del response.headers["Content-Encoding"]
return response
|
https://github.com/scrapy/scrapy/issues/2145
|
Traceback (most recent call last):
File "twisted/internet/defer.py", line 1128, in _inlineCallbacks
result = g.send(result)
File "scrapy/core/downloader/middleware.py", line 53, in process_response
spider=spider)
File "scrapy/downloadermiddlewares/httpcompression.py", line 38, in process_response
response = response.replace(**kwargs)
File "scrapy/http/response/text.py", line 50, in replace
return Response.replace(self, *args, **kwargs)
File "scrapy/http/response/__init__.py", line 77, in replace
return cls(*args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'encoding'
|
TypeError
|
def __init__(self, settings):
self.default_user = settings["FTP_USER"]
self.default_password = settings["FTP_PASSWORD"]
self.passive_mode = settings["FTP_PASSIVE_MODE"]
|
def __init__(self, setting):
pass
|
https://github.com/scrapy/scrapy/issues/2342
|
$ scrapy version -v
Scrapy : 1.2.0
lxml : 3.6.4.0
libxml2 : 2.9.4
Twisted : 16.4.1
Python : 2.7.12 (default, Jul 1 2016, 15:12:24) - [GCC 5.4.0 20160609]
pyOpenSSL : 16.1.0 (OpenSSL 1.0.2g 1 Mar 2016)
Platform : Linux-4.4.0-43-generic-x86_64-with-Ubuntu-16.04-xenial
$ scrapy shell ftp://ftp.eu.metabrainz.org/pub/musicbrainz/data/fullexport/20161019-001816/MD5SUMS
2016-10-20 16:01:27 [scrapy] INFO: Scrapy 1.2.0 started (bot: scrapybot)
(...)
2016-10-20 16:01:27 [scrapy] INFO: Spider opened
Traceback (most recent call last):
File "/home/paul/.virtualenvs/scrapy12/bin/scrapy", line 11, in <module>
sys.exit(execute())
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/commands/shell.py", line 71, in run
shell.start(url=url)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/shell.py", line 47, in start
self.fetch(url, spider)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/shell.py", line 112, in fetch
reactor, self._schedule, request, spider)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "<string>", line 2, in raiseException
KeyError: 'ftp_user'
|
KeyError
|
def download_request(self, request, spider):
parsed_url = urlparse_cached(request)
user = request.meta.get("ftp_user", self.default_user)
password = request.meta.get("ftp_password", self.default_password)
passive_mode = 1 if bool(request.meta.get("ftp_passive", self.passive_mode)) else 0
creator = ClientCreator(reactor, FTPClient, user, password, passive=passive_mode)
return creator.connectTCP(parsed_url.hostname, parsed_url.port or 21).addCallback(
self.gotClient, request, unquote(parsed_url.path)
)
|
def download_request(self, request, spider):
parsed_url = urlparse(request.url)
creator = ClientCreator(
reactor,
FTPClient,
request.meta["ftp_user"],
request.meta["ftp_password"],
passive=request.meta.get("ftp_passive", 1),
)
return creator.connectTCP(parsed_url.hostname, parsed_url.port or 21).addCallback(
self.gotClient, request, unquote(parsed_url.path)
)
|
https://github.com/scrapy/scrapy/issues/2342
|
$ scrapy version -v
Scrapy : 1.2.0
lxml : 3.6.4.0
libxml2 : 2.9.4
Twisted : 16.4.1
Python : 2.7.12 (default, Jul 1 2016, 15:12:24) - [GCC 5.4.0 20160609]
pyOpenSSL : 16.1.0 (OpenSSL 1.0.2g 1 Mar 2016)
Platform : Linux-4.4.0-43-generic-x86_64-with-Ubuntu-16.04-xenial
$ scrapy shell ftp://ftp.eu.metabrainz.org/pub/musicbrainz/data/fullexport/20161019-001816/MD5SUMS
2016-10-20 16:01:27 [scrapy] INFO: Scrapy 1.2.0 started (bot: scrapybot)
(...)
2016-10-20 16:01:27 [scrapy] INFO: Spider opened
Traceback (most recent call last):
File "/home/paul/.virtualenvs/scrapy12/bin/scrapy", line 11, in <module>
sys.exit(execute())
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/commands/shell.py", line 71, in run
shell.start(url=url)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/shell.py", line 47, in start
self.fetch(url, spider)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/shell.py", line 112, in fetch
reactor, self._schedule, request, spider)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "<string>", line 2, in raiseException
KeyError: 'ftp_user'
|
KeyError
|
def add_options(self, parser):
super(Command, self).add_options(parser)
parser.remove_option("--headers")
|
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--spider", dest="spider", help="use this spider")
|
https://github.com/scrapy/scrapy/issues/2501
|
(py35) wingyiu@mbp101:~$scrapy view http://www.scrapy.org
2017-01-19 22:13:54 [scrapy.utils.log] INFO: Scrapy 1.3.0 started (bot: scrapybot)
2017-01-19 22:13:54 [scrapy.utils.log] INFO: Overridden settings: {}
Traceback (most recent call last):
File "/Users/user/venv/py35/bin/scrapy", line 11, in <module>
sys.exit(execute())
File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/Users/user/venv/py35/lib/python3.5/site-packages/scrapy/commands/fetch.py", line 58, in run
if not opts.no_redirect:
AttributeError: 'Values' object has no attribute 'no_redirect'
|
AttributeError
|
def getHostByName(self, name, timeout=None):
if name in dnscache:
return defer.succeed(dnscache[name])
# in Twisted<=16.6, getHostByName() is always called with
# a default timeout of 60s (actually passed as (1, 3, 11, 45) tuple),
# so the input argument above is simply overridden
# to enforce Scrapy's DNS_TIMEOUT setting's value
timeout = (self.timeout,)
d = super(CachingThreadedResolver, self).getHostByName(name, timeout)
d.addCallback(self._cache_result, name)
return d
|
def getHostByName(self, name, timeout=None):
if name in dnscache:
return defer.succeed(dnscache[name])
if not timeout:
timeout = self.timeout
d = super(CachingThreadedResolver, self).getHostByName(name, timeout)
d.addCallback(self._cache_result, name)
return d
|
https://github.com/scrapy/scrapy/issues/2461
|
$ scrapy shell http://localhost:8081/
2016-12-22 12:52:01 [scrapy.utils.log] INFO: Scrapy 1.2.2 started (bot: scrapybot)
2016-12-22 12:52:01 [scrapy.utils.log] INFO: Overridden settings: {'LOGSTATS_INTERVAL': 0, 'DUPEFILTER_CLASS': 'scrapy.dupefilters.BaseDupeFilter'}
2016-12-22 12:52:01 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.corestats.CoreStats']
2016-12-22 12:52:01 [scrapy.middleware] INFO: Enabled downloader middlewares:
['scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware',
'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware',
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware',
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware',
'scrapy.downloadermiddlewares.retry.RetryMiddleware',
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware',
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware',
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware',
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware',
'scrapy.downloadermiddlewares.stats.DownloaderStats']
2016-12-22 12:52:01 [scrapy.middleware] INFO: Enabled spider middlewares:
['scrapy.spidermiddlewares.httperror.HttpErrorMiddleware',
'scrapy.spidermiddlewares.offsite.OffsiteMiddleware',
'scrapy.spidermiddlewares.referer.RefererMiddleware',
'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware',
'scrapy.spidermiddlewares.depth.DepthMiddleware']
2016-12-22 12:52:01 [scrapy.middleware] INFO: Enabled item pipelines:
[]
2016-12-22 12:52:01 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2016-12-22 12:52:01 [scrapy.core.engine] INFO: Spider opened
Traceback (most recent call last):
File "/Users/rolando/miniconda3/envs/dev/bin/scrapy", line 11, in <module>
load_entry_point('Scrapy', 'console_scripts', 'scrapy')()
File "/Users/rolando/Projects/sh/scrapy/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/Users/rolando/Projects/sh/scrapy/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/Users/rolando/Projects/sh/scrapy/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/Users/rolando/Projects/sh/scrapy/scrapy/commands/shell.py", line 71, in run
shell.start(url=url)
File "/Users/rolando/Projects/sh/scrapy/scrapy/shell.py", line 47, in start
self.fetch(url, spider)
File "/Users/rolando/Projects/sh/scrapy/scrapy/shell.py", line 112, in fetch
reactor, self._schedule, request, spider)
File "/Users/rolando/Projects/gh/twisted/src/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "/Users/rolando/Projects/gh/twisted/src/twisted/python/failure.py", line 372, in raiseException
raise self.value.with_traceback(self.tb)
TypeError: 'float' object is not iterable
|
TypeError
|
def process_request_2(self, rp, request, spider):
if rp is not None and not rp.can_fetch(to_native_str(self._useragent), request.url):
logger.debug(
"Forbidden by robots.txt: %(request)s",
{"request": request},
extra={"spider": spider},
)
raise IgnoreRequest()
|
def process_request_2(self, rp, request, spider):
if rp is not None and not rp.can_fetch(self._useragent, request.url):
logger.debug(
"Forbidden by robots.txt: %(request)s",
{"request": request},
extra={"spider": spider},
)
raise IgnoreRequest()
|
https://github.com/scrapy/scrapy/issues/2373
|
2016-11-02 13:13:18 [scrapy] DEBUG: Crawled (200) <GET https://en.wikipedia.org/robots.txt> (referer: None)
2016-11-02 13:13:18 [py.warnings] WARNING: C:\Python27\lib\urllib.py:1303: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
return ''.join(map(quoter, s))
2016-11-02 13:13:18 [scrapy] ERROR: Error downloading <GET http://en.wikipedia.org/robots.txt>: u'\xd8'
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 587, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "C:\Python27\lib\site-packages\scrapy\downloadermiddlewares\robotstxt.py", line 97, in _parse_robots
rp.parse(body.splitlines())
File "C:\Python27\lib\robotparser.py", line 120, in parse
entry.rulelines.append(RuleLine(line[1], False))
File "C:\Python27\lib\robotparser.py", line 174, in __init__
self.path = urllib.quote(path)
File "C:\Python27\lib\urllib.py", line 1303, in quote
return ''.join(map(quoter, s))
KeyError: u'\xd8'
|
KeyError
|
def _parse_robots(self, response, netloc):
rp = robotparser.RobotFileParser(response.url)
body = ""
if hasattr(response, "text"):
body = response.text
else: # last effort try
try:
body = response.body.decode("utf-8")
except UnicodeDecodeError:
# If we found garbage, disregard it:,
# but keep the lookup cached (in self._parsers)
# Running rp.parse() will set rp state from
# 'disallow all' to 'allow any'.
pass
# stdlib's robotparser expects native 'str' ;
# with unicode input, non-ASCII encoded bytes decoding fails in Python2
rp.parse(to_native_str(body).splitlines())
rp_dfd = self._parsers[netloc]
self._parsers[netloc] = rp
rp_dfd.callback(rp)
|
def _parse_robots(self, response, netloc):
rp = robotparser.RobotFileParser(response.url)
body = ""
if hasattr(response, "text"):
body = response.text
else: # last effort try
try:
body = response.body.decode("utf-8")
except UnicodeDecodeError:
# If we found garbage, disregard it:,
# but keep the lookup cached (in self._parsers)
# Running rp.parse() will set rp state from
# 'disallow all' to 'allow any'.
pass
rp.parse(body.splitlines())
rp_dfd = self._parsers[netloc]
self._parsers[netloc] = rp
rp_dfd.callback(rp)
|
https://github.com/scrapy/scrapy/issues/2373
|
2016-11-02 13:13:18 [scrapy] DEBUG: Crawled (200) <GET https://en.wikipedia.org/robots.txt> (referer: None)
2016-11-02 13:13:18 [py.warnings] WARNING: C:\Python27\lib\urllib.py:1303: UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
return ''.join(map(quoter, s))
2016-11-02 13:13:18 [scrapy] ERROR: Error downloading <GET http://en.wikipedia.org/robots.txt>: u'\xd8'
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 587, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "C:\Python27\lib\site-packages\scrapy\downloadermiddlewares\robotstxt.py", line 97, in _parse_robots
rp.parse(body.splitlines())
File "C:\Python27\lib\robotparser.py", line 120, in parse
entry.rulelines.append(RuleLine(line[1], False))
File "C:\Python27\lib\robotparser.py", line 174, in __init__
self.path = urllib.quote(path)
File "C:\Python27\lib\urllib.py", line 1303, in quote
return ''.join(map(quoter, s))
KeyError: u'\xd8'
|
KeyError
|
def _parse_sitemap(self, response):
if response.url.endswith("/robots.txt"):
for url in sitemap_urls_from_robots(response.text, base_url=response.url):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning(
"Ignoring invalid sitemap: %(response)s",
{"response": response},
extra={"spider": self},
)
return
s = Sitemap(body)
if s.type == "sitemapindex":
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == "urlset":
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
|
def _parse_sitemap(self, response):
if response.url.endswith("/robots.txt"):
for url in sitemap_urls_from_robots(response.text):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning(
"Ignoring invalid sitemap: %(response)s",
{"response": response},
extra={"spider": self},
)
return
s = Sitemap(body)
if s.type == "sitemapindex":
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == "urlset":
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
|
https://github.com/scrapy/scrapy/issues/2390
|
$ scrapy runspider spider.py
Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'
2016-11-09 17:46:19 [scrapy] INFO: Scrapy 1.2.1 started (bot: scrapybot)
(...)
2016-11-09 17:46:19 [scrapy] DEBUG: Crawled (200) <GET http://www.asos.com/robots.txt> (referer: None)
2016-11-09 17:46:19 [scrapy] ERROR: Spider error processing <GET http://www.asos.com/robots.txt> (referer: None)
Traceback (most recent call last):
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/utils/defer.py", line 102, in iter_errback
yield next(it)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
for x in result:
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spiders/sitemap.py", line 36, in _parse_sitemap
yield Request(url, callback=self._parse_sitemap)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/http/request/__init__.py", line 25, in __init__
self._set_url(url)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/http/request/__init__.py", line 57, in _set_url
raise ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: /sitemap.ashx
2016-11-09 17:46:19 [scrapy] INFO: Closing spider (finished)
2016-11-09 17:46:19 [scrapy] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 291,
'downloader/request_count': 1,
'downloader/request_method_count/GET': 1,
'downloader/response_bytes': 1857,
'downloader/response_count': 1,
'downloader/response_status_count/200': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2016, 11, 9, 16, 46, 19, 332383),
'log_count/DEBUG': 2,
'log_count/ERROR': 1,
'log_count/INFO': 7,
'response_received_count': 1,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'spider_exceptions/ValueError': 1,
'start_time': datetime.datetime(2016, 11, 9, 16, 46, 19, 71714)}
2016-11-09 17:46:19 [scrapy] INFO: Spider closed (finished)
|
ValueError
|
def sitemap_urls_from_robots(robots_text, base_url=None):
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().lower().startswith("sitemap:"):
url = line.split(":", 1)[1].strip()
yield urljoin(base_url, url)
|
def sitemap_urls_from_robots(robots_text):
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().lower().startswith("sitemap:"):
yield line.split(":", 1)[1].strip()
|
https://github.com/scrapy/scrapy/issues/2390
|
$ scrapy runspider spider.py
Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'
2016-11-09 17:46:19 [scrapy] INFO: Scrapy 1.2.1 started (bot: scrapybot)
(...)
2016-11-09 17:46:19 [scrapy] DEBUG: Crawled (200) <GET http://www.asos.com/robots.txt> (referer: None)
2016-11-09 17:46:19 [scrapy] ERROR: Spider error processing <GET http://www.asos.com/robots.txt> (referer: None)
Traceback (most recent call last):
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/utils/defer.py", line 102, in iter_errback
yield next(it)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
for x in result:
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/referer.py", line 22, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/spiders/sitemap.py", line 36, in _parse_sitemap
yield Request(url, callback=self._parse_sitemap)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/http/request/__init__.py", line 25, in __init__
self._set_url(url)
File "/home/paul/.virtualenvs/scrapy12/local/lib/python2.7/site-packages/scrapy/http/request/__init__.py", line 57, in _set_url
raise ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: /sitemap.ashx
2016-11-09 17:46:19 [scrapy] INFO: Closing spider (finished)
2016-11-09 17:46:19 [scrapy] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 291,
'downloader/request_count': 1,
'downloader/request_method_count/GET': 1,
'downloader/response_bytes': 1857,
'downloader/response_count': 1,
'downloader/response_status_count/200': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2016, 11, 9, 16, 46, 19, 332383),
'log_count/DEBUG': 2,
'log_count/ERROR': 1,
'log_count/INFO': 7,
'response_received_count': 1,
'scheduler/dequeued': 1,
'scheduler/dequeued/memory': 1,
'scheduler/enqueued': 1,
'scheduler/enqueued/memory': 1,
'spider_exceptions/ValueError': 1,
'start_time': datetime.datetime(2016, 11, 9, 16, 46, 19, 71714)}
2016-11-09 17:46:19 [scrapy] INFO: Spider closed (finished)
|
ValueError
|
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
|
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
self.heartbeat.stop()
self.closing.callback(None)
|
https://github.com/scrapy/scrapy/issues/2362
|
2016-10-26 16:34:15 [scrapy] INFO: Closing spider (shutdown)
Unhandled error in Deferred:
2016-10-26 16:34:15 [twisted] CRITICAL: Unhandled error in Deferred:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\scrapy\commands\crawl.py", line 57, in run
self.crawler_process.crawl(spname, **opts.spargs)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 163, in crawl
return self._crawl(crawler, *args, **kwargs)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 167, in _crawl
d = crawler.crawl(*args, **kwargs)
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1331, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
--- <exception caught here> ---
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1183, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "C:\Python27\lib\site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 87, in crawl
yield self.engine.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 100, in close
return self._close_all_spiders()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 340, in _close_all_spiders
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 298, in close_spider
dfd = slot.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 44, in close
self._maybe_fire_closing()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 51, in _maybe_fire_closing
self.heartbeat.stop()
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 202, in stop
assert self.running, ("Tried to stop a LoopingCall that was "
exceptions.AssertionError: Tried to stop a LoopingCall that was not running.
2016-10-26 16:34:15 [twisted] CRITICAL:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1183, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "C:\Python27\lib\site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 87, in crawl
yield self.engine.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 100, in close
return self._close_all_spiders()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 340, in _close_all_spiders
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 298, in close_spider
dfd = slot.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 44, in close
self._maybe_fire_closing()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 51, in _maybe_fire_closing
self.heartbeat.stop()
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 202, in stop
assert self.running, ("Tried to stop a LoopingCall that was "
AssertionError: Tried to stop a LoopingCall that was not running.
|
exceptions.AssertionError
|
def __init__(self, stats, interval=60.0):
self.stats = stats
self.interval = interval
self.multiplier = 60.0 / self.interval
self.task = None
|
def __init__(self, stats, interval=60.0):
self.stats = stats
self.interval = interval
self.multiplier = 60.0 / self.interval
|
https://github.com/scrapy/scrapy/issues/2362
|
2016-10-26 16:34:15 [scrapy] INFO: Closing spider (shutdown)
Unhandled error in Deferred:
2016-10-26 16:34:15 [twisted] CRITICAL: Unhandled error in Deferred:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\scrapy\commands\crawl.py", line 57, in run
self.crawler_process.crawl(spname, **opts.spargs)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 163, in crawl
return self._crawl(crawler, *args, **kwargs)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 167, in _crawl
d = crawler.crawl(*args, **kwargs)
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1331, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
--- <exception caught here> ---
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1183, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "C:\Python27\lib\site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 87, in crawl
yield self.engine.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 100, in close
return self._close_all_spiders()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 340, in _close_all_spiders
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 298, in close_spider
dfd = slot.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 44, in close
self._maybe_fire_closing()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 51, in _maybe_fire_closing
self.heartbeat.stop()
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 202, in stop
assert self.running, ("Tried to stop a LoopingCall that was "
exceptions.AssertionError: Tried to stop a LoopingCall that was not running.
2016-10-26 16:34:15 [twisted] CRITICAL:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1183, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "C:\Python27\lib\site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 87, in crawl
yield self.engine.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 100, in close
return self._close_all_spiders()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 340, in _close_all_spiders
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 298, in close_spider
dfd = slot.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 44, in close
self._maybe_fire_closing()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 51, in _maybe_fire_closing
self.heartbeat.stop()
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 202, in stop
assert self.running, ("Tried to stop a LoopingCall that was "
AssertionError: Tried to stop a LoopingCall that was not running.
|
exceptions.AssertionError
|
def spider_closed(self, spider, reason):
if self.task and self.task.running:
self.task.stop()
|
def spider_closed(self, spider, reason):
if self.task.running:
self.task.stop()
|
https://github.com/scrapy/scrapy/issues/2362
|
2016-10-26 16:34:15 [scrapy] INFO: Closing spider (shutdown)
Unhandled error in Deferred:
2016-10-26 16:34:15 [twisted] CRITICAL: Unhandled error in Deferred:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\scrapy\commands\crawl.py", line 57, in run
self.crawler_process.crawl(spname, **opts.spargs)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 163, in crawl
return self._crawl(crawler, *args, **kwargs)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 167, in _crawl
d = crawler.crawl(*args, **kwargs)
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1331, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
--- <exception caught here> ---
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1183, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "C:\Python27\lib\site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 87, in crawl
yield self.engine.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 100, in close
return self._close_all_spiders()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 340, in _close_all_spiders
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 298, in close_spider
dfd = slot.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 44, in close
self._maybe_fire_closing()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 51, in _maybe_fire_closing
self.heartbeat.stop()
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 202, in stop
assert self.running, ("Tried to stop a LoopingCall that was "
exceptions.AssertionError: Tried to stop a LoopingCall that was not running.
2016-10-26 16:34:15 [twisted] CRITICAL:
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\twisted\internet\defer.py", line 1183, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "C:\Python27\lib\site-packages\twisted\python\failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "C:\Python27\lib\site-packages\scrapy\crawler.py", line 87, in crawl
yield self.engine.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 100, in close
return self._close_all_spiders()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 340, in _close_all_spiders
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 298, in close_spider
dfd = slot.close()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 44, in close
self._maybe_fire_closing()
File "C:\Python27\lib\site-packages\scrapy\core\engine.py", line 51, in _maybe_fire_closing
self.heartbeat.stop()
File "C:\Python27\lib\site-packages\twisted\internet\task.py", line 202, in stop
assert self.running, ("Tried to stop a LoopingCall that was "
AssertionError: Tried to stop a LoopingCall that was not running.
|
exceptions.AssertionError
|
def getCertificateOptions(self):
# setting verify=True will require you to provide CAs
# to verify against; in other words: it's not that simple
# backward-compatible SSL/TLS method:
#
# * this will respect `method` attribute in often recommended
# `ScrapyClientContextFactory` subclass
# (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)
#
# * getattr() for `_ssl_method` attribute for context factories
# not calling super(..., self).__init__
return CertificateOptions(
verify=False,
method=getattr(self, "method", getattr(self, "_ssl_method", None)),
fixBrokenPeers=True,
acceptableCiphers=DEFAULT_CIPHERS,
)
|
def getCertificateOptions(self):
# setting verify=True will require you to provide CAs
# to verify against; in other words: it's not that simple
# backward-compatible SSL/TLS method:
#
# * this will respect `method` attribute in often recommended
# `ScrapyClientContextFactory` subclass
# (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)
#
# * getattr() for `_ssl_method` attribute for context factories
# not calling super(..., self).__init__
return CertificateOptions(
verify=False, method=getattr(self, "method", getattr(self, "_ssl_method", None))
)
|
https://github.com/scrapy/scrapy/issues/2311
|
2016-10-06 22:15:40 [scrapy] DEBUG: Telnet console listening on 127.0.0.1:6043
2016-10-06 22:15:40 [scrapy] INFO: Spider opened
2016-10-06 22:15:40 [scrapy] DEBUG: Retrying <GET https://subscribe.wsj.com/printpack/> (failed 1 times): [<twisted.python.failure.Failure OpenSSL.SSL.Error: [('SSL routines', 'SSL23_GET_SERVER_HELLO', 'sslv3 alert handshake failure')]>]
2016-10-06 22:15:40 [scrapy] DEBUG: Retrying <GET https://subscribe.wsj.com/printpack/> (failed 2 times): [<twisted.python.failure.Failure OpenSSL.SSL.Error: [('SSL routines', 'SSL23_GET_SERVER_HELLO', 'sslv3 alert handshake failure')]>]
2016-10-06 22:15:40 [scrapy] DEBUG: Gave up retrying <GET https://subscribe.wsj.com/printpack/> (failed 3 times): [<twisted.python.failure.Failure OpenSSL.SSL.Error: [('SSL routines', 'SSL23_GET_SERVER_HELLO', 'sslv3 alert handshake failure')]>]
Traceback (most recent call last):
File "/usr/local/bin/scrapy", line 11, in <module>
sys.exit(execute())
File "/usr/local/lib/python2.7/dist-packages/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/usr/local/lib/python2.7/dist-packages/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/usr/local/lib/python2.7/dist-packages/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/usr/local/lib/python2.7/dist-packages/scrapy/commands/shell.py", line 71, in run
shell.start(url=url)
File "/usr/local/lib/python2.7/dist-packages/scrapy/shell.py", line 47, in start
self.fetch(url, spider)
File "/usr/local/lib/python2.7/dist-packages/scrapy/shell.py", line 112, in fetch
reactor, self._schedule, request, spider)
File "/usr/local/lib/python2.7/dist-packages/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "<string>", line 2, in raiseException
twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure OpenSSL.SSL.Error: [('SSL routines', 'SSL23_GET_SERVER_HELLO', 'sslv3 alert handshake failure')]>]
|
OpenSSL.SSL.Error
|
def _identityVerifyingInfoCallback(self, connection, where, ret):
if where & SSL_CB_HANDSHAKE_START:
_maybeSetHostNameIndication(connection, self._hostnameBytes)
elif where & SSL_CB_HANDSHAKE_DONE:
try:
verifyHostname(connection, self._hostnameASCII)
except VerificationError as e:
logger.warning(
'Remote certificate is not valid for hostname "{}"; {}'.format(
self._hostnameASCII, e
)
)
except ValueError as e:
logger.warning(
"Ignoring error while verifying certificate "
'from host "{}" (exception: {})'.format(self._hostnameASCII, repr(e))
)
|
def _identityVerifyingInfoCallback(self, connection, where, ret):
if where & SSL_CB_HANDSHAKE_START:
_maybeSetHostNameIndication(connection, self._hostnameBytes)
elif where & SSL_CB_HANDSHAKE_DONE:
try:
verifyHostname(connection, self._hostnameASCII)
except VerificationError as e:
logger.warning(
'Remote certificate is not valid for hostname "{}"; {}'.format(
self._hostnameASCII, e
)
)
|
https://github.com/scrapy/scrapy/issues/2092
|
2016-07-05 15:50:17 [twisted] CRITICAL: Error during info_callback
Traceback (most recent call last):
File "c:\python27\lib\site-packages\twisted\protocols\tls.py", line 421, in dataReceived
self._write(bytes)
File "c:\python27\lib\site-packages\twisted\protocols\tls.py", line 569, in _write
sent = self._tlsConnection.send(toSend)
File "c:\python27\lib\site-packages\OpenSSL\SSL.py", line 1270, in send
result = _lib.SSL_write(self._ssl, buf, len(buf))
File "c:\python27\lib\site-packages\OpenSSL\SSL.py", line 933, in wrapper
callback(Connection._reverse_mapping[ssl], where, return_code)
--- <exception caught here> ---
File "c:\python27\lib\site-packages\twisted\internet\_sslverify.py", line 1154, in infoCallback
return wrapped(connection, where, ret)
File "c:\python27\lib\site-packages\scrapy\core\downloader\tls.py", line 45, in _identityVerifyingInfoCallback
verifyHostname(connection, self._hostnameASCII)
File "c:\python27\lib\site-packages\service_identity\pyopenssl.py", line 45, in verify_hostname
obligatory_ids=[DNS_ID(hostname)],
File "c:\python27\lib\site-packages\service_identity\_common.py", line 245, in __init__
raise ValueError("Invalid DNS-ID.")
exceptions.ValueError: Invalid DNS-ID.
|
exceptions.ValueError
|
def _safe_ParseResult(parts, encoding="utf8", path_encoding="utf8"):
# IDNA encoding can fail for too long labels (>63 characters)
# or missing labels (e.g. http://.example.com)
try:
netloc = parts.netloc.encode("idna")
except UnicodeError:
netloc = parts.netloc
return (
to_native_str(parts.scheme),
to_native_str(netloc),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
quote(to_bytes(parts.params, path_encoding), _safe_chars),
# encoding of query and fragment follows page encoding
# or form-charset (if known and passed)
quote(to_bytes(parts.query, encoding), _safe_chars),
quote(to_bytes(parts.fragment, encoding), _safe_chars),
)
|
def _safe_ParseResult(parts, encoding="utf8", path_encoding="utf8"):
return (
to_native_str(parts.scheme),
to_native_str(parts.netloc.encode("idna")),
# default encoding for path component SHOULD be UTF-8
quote(to_bytes(parts.path, path_encoding), _safe_chars),
quote(to_bytes(parts.params, path_encoding), _safe_chars),
# encoding of query and fragment follows page encoding
# or form-charset (if known and passed)
quote(to_bytes(parts.query, encoding), _safe_chars),
quote(to_bytes(parts.fragment, encoding), _safe_chars),
)
|
https://github.com/scrapy/scrapy/issues/2010
|
2016-05-25 12:13:55,432 [root] [ERROR] Error on http://detroit.curbed.com/2016/5/5/11605132/tiny-house-designer-show, traceback: Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/twisted/internet/base.py", line 1203, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python2.7/site-packages/twisted/internet/base.py", line 825, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 393, in callback
self._startRunCallbacks(result)
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 501, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 588, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/var/www/html/DomainCrawler/DomainCrawler/spiders/hybrid_spider.py", line 223, in parse
items.extend(self._extract_requests(response))
File "/var/www/html/DomainCrawler/DomainCrawler/spiders/hybrid_spider.py", line 477, in _extract_requests
links = self.link_extractor.extract_links(response)
File "/usr/local/lib/python2.7/site-packages/scrapy/linkextractors/lxmlhtml.py", line 111, in extract_links
all_links.extend(self._process_links(links))
File "/usr/local/lib/python2.7/site-packages/scrapy/linkextractors/__init__.py", line 103, in _process_links
link.url = canonicalize_url(urlparse(link.url))
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/url.py", line 85, in canonicalize_url
parse_url(url), encoding=encoding)
File "/usr/local/lib/python2.7/site-packages/scrapy/utils/url.py", line 46, in _safe_ParseResult
to_native_str(parts.netloc.encode('idna')),
File "/usr/local/lib/python2.7/encodings/idna.py", line 164, in encode
result.append(ToASCII(label))
File "/usr/local/lib/python2.7/encodings/idna.py", line 73, in ToASCII
raise UnicodeError("label empty or too long")
exceptions.UnicodeError: label empty or too long
|
exceptions.UnicodeError
|
def from_content_disposition(self, content_disposition):
try:
filename = (
to_native_str(content_disposition, encoding="latin-1", errors="replace")
.split(";")[1]
.split("=")[1]
)
filename = filename.strip("\"'")
return self.from_filename(filename)
except IndexError:
return Response
|
def from_content_disposition(self, content_disposition):
try:
filename = to_native_str(content_disposition).split(";")[1].split("=")[1]
filename = filename.strip("\"'")
return self.from_filename(filename)
except IndexError:
return Response
|
https://github.com/scrapy/scrapy/issues/1782
|
Traceback (most recent call last):
File "/Users/kmike/envs/dl/bin/scrapy", line 9, in <module>
load_entry_point('Scrapy', 'console_scripts', 'scrapy')()
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/Users/kmike/svn/scrapy/scrapy/commands/shell.py", line 71, in run
shell.start(url=url)
File "/Users/kmike/svn/scrapy/scrapy/shell.py", line 47, in start
self.fetch(url, spider)
File "/Users/kmike/svn/scrapy/scrapy/shell.py", line 112, in fetch
reactor, self._schedule, request, spider)
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py", line 368, in raiseException
raise self.value.with_traceback(self.tb)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte
|
UnicodeDecodeError
|
def get_func_args(func, stripself=False):
"""Return the argument name list of a callable"""
if inspect.isfunction(func):
func_args, _, _, _ = inspect.getargspec(func)
elif inspect.isclass(func):
return get_func_args(func.__init__, True)
elif inspect.ismethod(func):
return get_func_args(func.__func__, True)
elif inspect.ismethoddescriptor(func):
return []
elif isinstance(func, partial):
return [
x
for x in get_func_args(func.func)[len(func.args) :]
if not (func.keywords and x in func.keywords)
]
elif hasattr(func, "__call__"):
if inspect.isroutine(func):
return []
elif getattr(func, "__name__", None) == "__call__":
return []
else:
return get_func_args(func.__call__, True)
else:
raise TypeError("%s is not callable" % type(func))
if stripself:
func_args.pop(0)
return func_args
|
def get_func_args(func, stripself=False):
"""Return the argument name list of a callable"""
if inspect.isfunction(func):
func_args, _, _, _ = inspect.getargspec(func)
elif inspect.isclass(func):
return get_func_args(func.__init__, True)
elif inspect.ismethod(func):
return get_func_args(func.__func__, True)
elif inspect.ismethoddescriptor(func):
return []
elif isinstance(func, partial):
return [
x
for x in get_func_args(func.func)[len(func.args) :]
if not (func.keywords and x in func.keywords)
]
elif hasattr(func, "__call__"):
if inspect.isroutine(func):
return []
else:
return get_func_args(func.__call__, True)
else:
raise TypeError("%s is not callable" % type(func))
if stripself:
func_args.pop(0)
return func_args
|
https://github.com/scrapy/scrapy/issues/728
|
inspect.getmembers(itemgetter(2))
[('__call__',
<method-wrapper '__call__' of operator.itemgetter object at 0x7f79aeffb990>),
('__class__', <type 'operator.itemgetter'>),
('__delattr__',
<method-wrapper '__delattr__' of operator.itemgetter object at 0x7f79aeffb990>),
('__doc__',
'itemgetter(item, ...) --> itemgetter object\n\nReturn a callable object that fetches the given item(s) from its operand.\nAfter, f=itemgetter(2), the call f(r) returns r[2].\nAfter, g=itemgetter(2,5,3), the call g(r) returns (r[2], r[5], r[3])'),
('__format__',
<built-in method __format__ of operator.itemgetter object at 0x7f79aeffb990>),
('__getattribute__',
<method-wrapper '__getattribute__' of operator.itemgetter object at 0x7f79aeffb990>),
('__hash__',
<method-wrapper '__hash__' of operator.itemgetter object at 0x7f79aeffb990>),
('__init__',
<method-wrapper '__init__' of operator.itemgetter object at 0x7f79aeffb990>),
('__new__', <built-in method __new__ of type object at 0x8c1ec0>),
('__reduce__',
<built-in method __reduce__ of operator.itemgetter object at 0x7f79aeffb990>),
('__reduce_ex__',
<built-in method __reduce_ex__ of operator.itemgetter object at 0x7f79aeffb990>),
('__repr__',
<method-wrapper '__repr__' of operator.itemgetter object at 0x7f79aeffb990>),
('__setattr__',
<method-wrapper '__setattr__' of operator.itemgetter object at 0x7f79aeffb990>),
('__sizeof__',
<built-in method __sizeof__ of operator.itemgetter object at 0x7f79aeffb990>),
('__str__',
<method-wrapper '__str__' of operator.itemgetter object at 0x7f79aeffb990>),
('__subclasshook__',
<built-in method __subclasshook__ of type object at 0x8c1ec0>)]
inspect.getargspec(itemgetter(2).__call__)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/lib/python2.7/inspect.py", line 815, in getargspec
raise TypeError('{!r} is not a Python function'.format(func))
TypeError: <method-wrapper '__call__' of operator.itemgetter object at 0xb3ddd0> is not a Python function
inspect.getargspec(itemgetter(slice(None, 2)).__init__)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/lib/python2.7/inspect.py", line 815, in getargspec
raise TypeError('{!r} is not a Python function'.format(func))
TypeError: <method-wrapper '__init__' of operator.itemgetter object at 0xb3de10> is not a Python function
|
TypeError
|
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
|
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
self.links = []
|
https://github.com/scrapy/scrapy/issues/763
|
$ trial scrapy.tests.test_crawl.CrawlTestCase
scrapy.tests.test_crawl
CrawlTestCase
test_delay ... [OK]
test_engine_status ... [OK]
test_follow_all ... [FAIL]
test_referer_header ... [OK]
test_retry_503 ... [OK]
test_retry_conn_aborted ... [OK]
test_retry_conn_failed ... [OK]
test_retry_conn_lost ... [OK]
test_retry_dns_error ... [OK]
test_start_requests_bug_before_yield ... [OK]
test_start_requests_bug_yielding ... [OK]
test_start_requests_dupes ... [OK]
test_start_requests_lazyness ... [OK]
test_timeout_failure ... [OK]
test_timeout_success ... [OK]
test_unbounded_response ... [OK]
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/daniel/envs/scrapy/lib/python2.7/site-packages/twisted/internet/defer.py", line 1099, in _inlineCallbacks
result = g.send(result)
File "/home/daniel/src/scrapy/scrapy/tests/test_crawl.py", line 26, in test_follow_all
self.assertEqual(len(spider.urls_visited), 11) # 10 + start_url
File "/home/daniel/envs/scrapy/lib/python2.7/site-packages/twisted/trial/_synctest.py", line 356, in assertEqual
% (msg, pformat(first), pformat(second)))
twisted.trial.unittest.FailTest: not equal:
a = 31
b = 11
scrapy.tests.test_crawl.CrawlTestCase.test_follow_all
-------------------------------------------------------------------------------
Ran 16 tests in 90.150s
FAILED (failures=1, successes=15)
$ trial scrapy.tests.test_crawl.CrawlTestCase.test_follow_all
scrapy.tests.test_crawl
CrawlTestCase
test_follow_all ... [OK]
-------------------------------------------------------------------------------
Ran 1 tests in 0.874s
PASSED (successes=1)
|
test_retry_dns_error
|
def _extract_links(self, selector, response_url, response_encoding, base_url):
links = []
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector._root):
if self.scan_tag(el.tag) and self.scan_attr(attr):
# pseudo _root.make_links_absolute(base_url)
attr_val = urljoin(base_url, attr_val)
url = self.process_attr(attr_val)
if url is None:
continue
if isinstance(url, unicode):
url = url.encode(response_encoding)
# to fix relative links after process_value
url = urljoin(response_url, url)
link = Link(
url,
_collect_string_content(el) or "",
nofollow=True if el.get("rel") == "nofollow" else False,
)
links.append(link)
return unique_list(links, key=lambda link: link.url) if self.unique else links
|
def _extract_links(self, selector, response_url, response_encoding, base_url):
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector._root):
if self.scan_tag(el.tag) and self.scan_attr(attr):
# pseudo _root.make_links_absolute(base_url)
attr_val = urljoin(base_url, attr_val)
url = self.process_attr(attr_val)
if url is None:
continue
if isinstance(url, unicode):
url = url.encode(response_encoding)
# to fix relative links after process_value
url = urljoin(response_url, url)
link = Link(
url,
_collect_string_content(el) or "",
nofollow=True if el.get("rel") == "nofollow" else False,
)
self.links.append(link)
links = (
unique_list(self.links, key=lambda link: link.url)
if self.unique
else self.links
)
return links
|
https://github.com/scrapy/scrapy/issues/763
|
$ trial scrapy.tests.test_crawl.CrawlTestCase
scrapy.tests.test_crawl
CrawlTestCase
test_delay ... [OK]
test_engine_status ... [OK]
test_follow_all ... [FAIL]
test_referer_header ... [OK]
test_retry_503 ... [OK]
test_retry_conn_aborted ... [OK]
test_retry_conn_failed ... [OK]
test_retry_conn_lost ... [OK]
test_retry_dns_error ... [OK]
test_start_requests_bug_before_yield ... [OK]
test_start_requests_bug_yielding ... [OK]
test_start_requests_dupes ... [OK]
test_start_requests_lazyness ... [OK]
test_timeout_failure ... [OK]
test_timeout_success ... [OK]
test_unbounded_response ... [OK]
===============================================================================
[FAIL]
Traceback (most recent call last):
File "/home/daniel/envs/scrapy/lib/python2.7/site-packages/twisted/internet/defer.py", line 1099, in _inlineCallbacks
result = g.send(result)
File "/home/daniel/src/scrapy/scrapy/tests/test_crawl.py", line 26, in test_follow_all
self.assertEqual(len(spider.urls_visited), 11) # 10 + start_url
File "/home/daniel/envs/scrapy/lib/python2.7/site-packages/twisted/trial/_synctest.py", line 356, in assertEqual
% (msg, pformat(first), pformat(second)))
twisted.trial.unittest.FailTest: not equal:
a = 31
b = 11
scrapy.tests.test_crawl.CrawlTestCase.test_follow_all
-------------------------------------------------------------------------------
Ran 16 tests in 90.150s
FAILED (failures=1, successes=15)
$ trial scrapy.tests.test_crawl.CrawlTestCase.test_follow_all
scrapy.tests.test_crawl
CrawlTestCase
test_follow_all ... [OK]
-------------------------------------------------------------------------------
Ran 1 tests in 0.874s
PASSED (successes=1)
|
test_retry_dns_error
|
def outgoing_response(self, request_id: Any, params: Any) -> None:
if not self.settings.log_debug:
return
self.log(
self.format_response(">>>", request_id), params, self.settings.log_payloads
)
|
def outgoing_response(self, request_id: Any, params: Any) -> None:
if not self.settings.log_debug:
return
self.log(
self.format_response(Direction.Outgoing, request_id),
params,
self.settings.log_payloads,
)
|
https://github.com/sublimelsp/LSP/issues/905
|
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 201, in receive_payload
self.request_or_notification_handler(payload)
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 250, in request_or_notification_handler
request_id_int = int(request_id)
ValueError: invalid literal for int() with base 10: '32c845a7-9a86-4bf4-9d12-b4813fcfbca5'
|
ValueError
|
def outgoing_request(
self, request_id: int, method: str, params: Any, blocking: bool
) -> None:
if not self.settings.log_debug:
return
direction = "==>" if blocking else "-->"
self.log(
self.format_request(direction, method, request_id),
params,
self.settings.log_payloads,
)
|
def outgoing_request(
self, request_id: int, method: str, params: Any, blocking: bool
) -> None:
if not self.settings.log_debug:
return
direction = Direction.OutgoingBlocking if blocking else Direction.Outgoing
self.log(
self.format_request(direction, method, request_id),
params,
self.settings.log_payloads,
)
|
https://github.com/sublimelsp/LSP/issues/905
|
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 201, in receive_payload
self.request_or_notification_handler(payload)
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 250, in request_or_notification_handler
request_id_int = int(request_id)
ValueError: invalid literal for int() with base 10: '32c845a7-9a86-4bf4-9d12-b4813fcfbca5'
|
ValueError
|
def outgoing_notification(self, method: str, params: Any) -> None:
if not self.settings.log_debug:
return
# Do not log the payloads if any of these conditions occur because the payloads might contain the entire
# content of the view.
log_payload = (
self.settings.log_payloads
and method != "textDocument/didChange"
and method != "textDocument/didOpen"
)
if (
log_payload
and method == "textDocument/didSave"
and isinstance(params, dict)
and "text" in params
):
log_payload = False
self.log(self.format_notification(" ->", method), params, log_payload)
|
def outgoing_notification(self, method: str, params: Any) -> None:
if not self.settings.log_debug:
return
# Do not log the payloads if any of these conditions occur because the payloads might contain the entire
# content of the view.
log_payload = (
self.settings.log_payloads
and method != "textDocument/didChange"
and method != "textDocument/didOpen"
)
if (
log_payload
and method == "textDocument/didSave"
and isinstance(params, dict)
and "text" in params
):
log_payload = False
self.log(self.format_notification(Direction.Outgoing, method), params, log_payload)
|
https://github.com/sublimelsp/LSP/issues/905
|
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 201, in receive_payload
self.request_or_notification_handler(payload)
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 250, in request_or_notification_handler
request_id_int = int(request_id)
ValueError: invalid literal for int() with base 10: '32c845a7-9a86-4bf4-9d12-b4813fcfbca5'
|
ValueError
|
def incoming_response(self, request_id: int, params: Any) -> None:
if not self.settings.log_debug:
return
self.log(
self.format_response("<<<", request_id), params, self.settings.log_payloads
)
|
def incoming_response(self, request_id: int, params: Any) -> None:
if not self.settings.log_debug:
return
self.log(
self.format_response(Direction.Incoming, request_id),
params,
self.settings.log_payloads,
)
|
https://github.com/sublimelsp/LSP/issues/905
|
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 201, in receive_payload
self.request_or_notification_handler(payload)
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 250, in request_or_notification_handler
request_id_int = int(request_id)
ValueError: invalid literal for int() with base 10: '32c845a7-9a86-4bf4-9d12-b4813fcfbca5'
|
ValueError
|
def incoming_request(
self, request_id: Any, method: str, params: Any, unhandled: bool
) -> None:
if not self.settings.log_debug:
return
direction = "<??" if unhandled else "<--"
self.log(
self.format_request(direction, method, request_id),
params,
self.settings.log_payloads,
)
|
def incoming_request(
self, request_id: Any, method: str, params: Any, unhandled: bool
) -> None:
if not self.settings.log_debug:
return
direction = "unhandled" if unhandled else Direction.Incoming
self.log(
self.format_request(direction, method, request_id),
params,
self.settings.log_payloads,
)
|
https://github.com/sublimelsp/LSP/issues/905
|
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 201, in receive_payload
self.request_or_notification_handler(payload)
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 250, in request_or_notification_handler
request_id_int = int(request_id)
ValueError: invalid literal for int() with base 10: '32c845a7-9a86-4bf4-9d12-b4813fcfbca5'
|
ValueError
|
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
if not self.settings.log_debug or method == "window/logMessage":
return
direction = "<? " if unhandled else "<- "
self.log(
self.format_notification(direction, method), params, self.settings.log_payloads
)
|
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
if not self.settings.log_debug or method == "window/logMessage":
return
direction = "unhandled" if unhandled else Direction.Incoming
self.log(
self.format_notification(direction, method), params, self.settings.log_payloads
)
|
https://github.com/sublimelsp/LSP/issues/905
|
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 201, in receive_payload
self.request_or_notification_handler(payload)
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 250, in request_or_notification_handler
request_id_int = int(request_id)
ValueError: invalid literal for int() with base 10: '32c845a7-9a86-4bf4-9d12-b4813fcfbca5'
|
ValueError
|
def request_or_notification_handler(self, payload: Mapping[str, Any]) -> None:
method = payload["method"] # type: str
params = payload.get("params")
# Server request IDs can be either a string or an int.
request_id = payload.get("id")
if request_id is not None:
self.handle(
request_id,
method,
params,
"request",
self._request_handlers,
lambda *args: self.logger.incoming_request(request_id, *args),
)
else:
self.handle(
None,
method,
params,
"notification",
self._notification_handlers,
self.logger.incoming_notification,
)
|
def request_or_notification_handler(self, payload: Mapping[str, Any]) -> None:
method = payload["method"] # type: str
params = payload.get("params")
# Server request IDs can be either a string or an int.
request_id = payload.get("id")
if request_id is not None:
self.handle(
request_id,
method,
params,
"request",
self._request_handlers,
lambda a, b, c: self.logger.incoming_request(request_id, a, b, c),
)
else:
self.handle(
None,
method,
params,
"notification",
self._notification_handlers,
self.logger.incoming_notification,
)
|
https://github.com/sublimelsp/LSP/issues/905
|
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 201, in receive_payload
self.request_or_notification_handler(payload)
File "C:\Users\Janos\AppData\Roaming\Sublime Text 3\Installed Packages\LSP.sublime-package\plugin/core/rpc.py", line 250, in request_or_notification_handler
request_id_int = int(request_id)
ValueError: invalid literal for int() with base 10: '32c845a7-9a86-4bf4-9d12-b4813fcfbca5'
|
ValueError
|
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self.reflist = [] # type: List[List[str]]
self.word_region = None # type: Optional[sublime.Region]
self.word = ""
self.base_dir = None # type: Optional[str]
|
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
self.reflist = [] # type: List[List[str]]
|
https://github.com/sublimelsp/LSP/issues/727
|
LSP: --> textDocument/references
LSP: [{'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 25, 'character': 20}, 'start': {'line': 25, 'character': 9}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 434, 'character': 25}, 'start': {'line': 434, 'character': 14}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/rustup.ts', 'range': {'end': {'line': 67, 'character': 33}, 'start': {'line': 67, 'character': 22}}}]
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 150, in receive_payload
self.response_handler(payload)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 169, in response_handler
handler(response["result"])
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 45, in <lambda>
request, lambda response: self.handle_response(response, pos))
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 64, in handle_response
formatted_references = self._get_formatted_references(response, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 142, in _get_formatted_references
grouped_references = self._group_references_by_file(references, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 150, in _group_references_by_file
relative_file_path = os.path.relpath(file_path, base_dir)
File "./python3.3/ntpath.py", line 564, in relpath
ValueError: path is on mount 'D:', start on mount 'C:'
|
ValueError
|
def run(self, edit: sublime.Edit, event: "Optional[dict]" = None) -> None:
client = self.client_with_capability("referencesProvider")
if client:
pos = get_position(self.view, event)
window = self.view.window()
self.word_region = self.view.word(pos)
self.word = self.view.substr(self.word_region)
# use relative paths if file on the same root.
base_dir = windows.lookup(window).get_project_path()
if base_dir:
if os.path.commonprefix([base_dir, self.view.file_name()]):
self.base_dir = base_dir
document_position = get_document_position(self.view, pos)
if document_position:
document_position["context"] = {"includeDeclaration": False}
request = Request.references(document_position)
client.send_request(
request, lambda response: self.handle_response(response, pos)
)
|
def run(self, edit: sublime.Edit, event: "Optional[dict]" = None) -> None:
client = self.client_with_capability("referencesProvider")
if client:
pos = get_position(self.view, event)
document_position = get_document_position(self.view, pos)
if document_position:
document_position["context"] = {"includeDeclaration": False}
request = Request.references(document_position)
client.send_request(
request, lambda response: self.handle_response(response, pos)
)
|
https://github.com/sublimelsp/LSP/issues/727
|
LSP: --> textDocument/references
LSP: [{'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 25, 'character': 20}, 'start': {'line': 25, 'character': 9}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 434, 'character': 25}, 'start': {'line': 434, 'character': 14}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/rustup.ts', 'range': {'end': {'line': 67, 'character': 33}, 'start': {'line': 67, 'character': 22}}}]
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 150, in receive_payload
self.response_handler(payload)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 169, in response_handler
handler(response["result"])
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 45, in <lambda>
request, lambda response: self.handle_response(response, pos))
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 64, in handle_response
formatted_references = self._get_formatted_references(response, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 142, in _get_formatted_references
grouped_references = self._group_references_by_file(references, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 150, in _group_references_by_file
relative_file_path = os.path.relpath(file_path, base_dir)
File "./python3.3/ntpath.py", line 564, in relpath
ValueError: path is on mount 'D:', start on mount 'C:'
|
ValueError
|
def handle_response(self, response: "Optional[List[ReferenceDict]]", pos: int) -> None:
window = self.view.window()
if response is None:
response = []
references_count = len(response)
# return if there are no references
if references_count < 1:
window.run_command("hide_panel", {"panel": "output.references"})
window.status_message("No references found")
return
references_by_file = self._group_references_by_file(response)
if settings.show_references_in_quick_panel:
self.show_quick_panel(references_by_file)
else:
self.show_references_panel(references_by_file)
|
def handle_response(self, response: "Optional[List[ReferenceDict]]", pos: int) -> None:
window = self.view.window()
if response is None:
response = []
references_count = len(response)
# return if there are no references
if references_count < 1:
window.run_command("hide_panel", {"panel": "output.references"})
window.status_message("No references found")
return
word_region = self.view.word(pos)
word = self.view.substr(word_region)
base_dir = windows.lookup(window).get_project_path()
formatted_references = self._get_formatted_references(response, base_dir)
if settings.show_references_in_quick_panel:
flags = sublime.KEEP_OPEN_ON_FOCUS_LOST
if settings.quick_panel_monospace_font:
flags |= sublime.MONOSPACE_FONT
window.show_quick_panel(
self.reflist,
lambda index: self.on_ref_choice(base_dir, index),
flags,
self.get_current_ref(base_dir, word_region.begin()),
lambda index: self.on_ref_highlight(base_dir, index),
)
else:
panel = ensure_references_panel(window)
if not panel:
return
panel.settings().set("result_base_dir", base_dir)
panel.set_read_only(False)
panel.run_command("lsp_clear_panel")
window.run_command("show_panel", {"panel": "output.references"})
panel.run_command(
"append",
{
"characters": "{} references for '{}'\n\n{}".format(
references_count, word, formatted_references
),
"force": True,
"scroll_to_end": False,
},
)
# highlight all word occurrences
regions = panel.find_all(r"\b{}\b".format(word))
panel.add_regions(
"ReferenceHighlight", regions, "comment", flags=sublime.DRAW_OUTLINED
)
panel.set_read_only(True)
|
https://github.com/sublimelsp/LSP/issues/727
|
LSP: --> textDocument/references
LSP: [{'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 25, 'character': 20}, 'start': {'line': 25, 'character': 9}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 434, 'character': 25}, 'start': {'line': 434, 'character': 14}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/rustup.ts', 'range': {'end': {'line': 67, 'character': 33}, 'start': {'line': 67, 'character': 22}}}]
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 150, in receive_payload
self.response_handler(payload)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 169, in response_handler
handler(response["result"])
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 45, in <lambda>
request, lambda response: self.handle_response(response, pos))
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 64, in handle_response
formatted_references = self._get_formatted_references(response, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 142, in _get_formatted_references
grouped_references = self._group_references_by_file(references, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 150, in _group_references_by_file
relative_file_path = os.path.relpath(file_path, base_dir)
File "./python3.3/ntpath.py", line 564, in relpath
ValueError: path is on mount 'D:', start on mount 'C:'
|
ValueError
|
def on_ref_choice(self, index: int) -> None:
self.open_ref_index(index)
|
def on_ref_choice(self, base_dir: "Optional[str]", index: int) -> None:
window = self.view.window()
if index != -1:
window.open_file(
self.get_selected_file_path(base_dir, index), sublime.ENCODED_POSITION
)
|
https://github.com/sublimelsp/LSP/issues/727
|
LSP: --> textDocument/references
LSP: [{'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 25, 'character': 20}, 'start': {'line': 25, 'character': 9}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 434, 'character': 25}, 'start': {'line': 434, 'character': 14}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/rustup.ts', 'range': {'end': {'line': 67, 'character': 33}, 'start': {'line': 67, 'character': 22}}}]
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 150, in receive_payload
self.response_handler(payload)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 169, in response_handler
handler(response["result"])
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 45, in <lambda>
request, lambda response: self.handle_response(response, pos))
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 64, in handle_response
formatted_references = self._get_formatted_references(response, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 142, in _get_formatted_references
grouped_references = self._group_references_by_file(references, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 150, in _group_references_by_file
relative_file_path = os.path.relpath(file_path, base_dir)
File "./python3.3/ntpath.py", line 564, in relpath
ValueError: path is on mount 'D:', start on mount 'C:'
|
ValueError
|
def on_ref_highlight(self, index: int) -> None:
self.open_ref_index(index, transient=True)
|
def on_ref_highlight(self, base_dir: "Optional[str]", index: int) -> None:
window = self.view.window()
if index != -1:
window.open_file(
self.get_selected_file_path(base_dir, index),
sublime.ENCODED_POSITION | sublime.TRANSIENT,
)
|
https://github.com/sublimelsp/LSP/issues/727
|
LSP: --> textDocument/references
LSP: [{'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 25, 'character': 20}, 'start': {'line': 25, 'character': 9}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 434, 'character': 25}, 'start': {'line': 434, 'character': 14}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/rustup.ts', 'range': {'end': {'line': 67, 'character': 33}, 'start': {'line': 67, 'character': 22}}}]
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 150, in receive_payload
self.response_handler(payload)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 169, in response_handler
handler(response["result"])
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 45, in <lambda>
request, lambda response: self.handle_response(response, pos))
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 64, in handle_response
formatted_references = self._get_formatted_references(response, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 142, in _get_formatted_references
grouped_references = self._group_references_by_file(references, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 150, in _group_references_by_file
relative_file_path = os.path.relpath(file_path, base_dir)
File "./python3.3/ntpath.py", line 564, in relpath
ValueError: path is on mount 'D:', start on mount 'C:'
|
ValueError
|
def get_selected_file_path(self, index: int) -> str:
return self.get_full_path(self.reflist[index][0])
|
def get_selected_file_path(self, base_dir: "Optional[str]", index: int) -> str:
file_path = self.reflist[index][0]
if base_dir:
file_path = os.path.join(base_dir, file_path)
return file_path
|
https://github.com/sublimelsp/LSP/issues/727
|
LSP: --> textDocument/references
LSP: [{'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 25, 'character': 20}, 'start': {'line': 25, 'character': 9}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 434, 'character': 25}, 'start': {'line': 434, 'character': 14}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/rustup.ts', 'range': {'end': {'line': 67, 'character': 33}, 'start': {'line': 67, 'character': 22}}}]
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 150, in receive_payload
self.response_handler(payload)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 169, in response_handler
handler(response["result"])
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 45, in <lambda>
request, lambda response: self.handle_response(response, pos))
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 64, in handle_response
formatted_references = self._get_formatted_references(response, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 142, in _get_formatted_references
grouped_references = self._group_references_by_file(references, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 150, in _group_references_by_file
relative_file_path = os.path.relpath(file_path, base_dir)
File "./python3.3/ntpath.py", line 564, in relpath
ValueError: path is on mount 'D:', start on mount 'C:'
|
ValueError
|
def _group_references_by_file(
self, references: "List[ReferenceDict]"
) -> "Dict[str, List[Tuple[Point, str]]]":
"""Return a dictionary that groups references by the file it belongs."""
grouped_references = {} # type: Dict[str, List[Tuple[Point, str]]]
for reference in references:
file_path = uri_to_filename(reference["uri"])
point = Point.from_lsp(reference["range"]["start"])
# get line of the reference, to showcase its use
reference_line = linecache.getline(file_path, point.row + 1).strip()
if grouped_references.get(file_path) is None:
grouped_references[file_path] = []
grouped_references[file_path].append((point, reference_line))
# we don't want to cache the line, we always want to get fresh data
linecache.clearcache()
return grouped_references
|
def _group_references_by_file(
self, references: "List[ReferenceDict]", base_dir: "Optional[str]"
) -> "Dict[str, List[Tuple[Point, str]]]":
"""Return a dictionary that groups references by the file it belongs."""
grouped_references = {} # type: Dict[str, List[Tuple[Point, str]]]
for reference in references:
file_path = uri_to_filename(reference["uri"])
point = Point.from_lsp(reference["range"]["start"])
# get line of the reference, to showcase its use
reference_line = linecache.getline(file_path, point.row + 1).strip()
if base_dir:
file_path = os.path.relpath(file_path, base_dir)
if grouped_references.get(file_path) is None:
grouped_references[file_path] = []
grouped_references[file_path].append((point, reference_line))
# we don't want to cache the line, we always want to get fresh data
linecache.clearcache()
return grouped_references
|
https://github.com/sublimelsp/LSP/issues/727
|
LSP: --> textDocument/references
LSP: [{'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 25, 'character': 20}, 'start': {'line': 25, 'character': 9}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/extension.ts', 'range': {'end': {'line': 434, 'character': 25}, 'start': {'line': 434, 'character': 14}}}, {'uri': 'file:///D:/Amjad/rls-vscode/src/rustup.ts', 'range': {'end': {'line': 67, 'character': 33}, 'start': {'line': 67, 'character': 22}}}]
Error handling server payload
Traceback (most recent call last):
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 150, in receive_payload
self.response_handler(payload)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\core\rpc.py", line 169, in response_handler
handler(response["result"])
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 45, in <lambda>
request, lambda response: self.handle_response(response, pos))
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 64, in handle_response
formatted_references = self._get_formatted_references(response, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 142, in _get_formatted_references
grouped_references = self._group_references_by_file(references, base_dir)
File "C:\Users\Lehdhili\AppData\Roaming\Sublime Text 3\Packages\LSP\plugin\references.py", line 150, in _group_references_by_file
relative_file_path = os.path.relpath(file_path, base_dir)
File "./python3.3/ntpath.py", line 564, in relpath
ValueError: path is on mount 'D:', start on mount 'C:'
|
ValueError
|
def __init__(
self,
window: WindowLike,
configs: ConfigRegistry,
documents: DocumentHandler,
diagnostics: WindowDiagnostics,
session_starter: "Callable",
sublime: "Any",
handler_dispatcher,
on_closed: "Optional[Callable]" = None,
) -> None:
# to move here:
# configurations.py: window_client_configs and all references
self._window = window
self._configs = configs
self._diagnostics = diagnostics
self._documents = documents
self._sessions = dict() # type: Dict[str, Session]
self._start_session = session_starter
self._sublime = sublime
self._handlers = handler_dispatcher
self._restarting = False
self._project_path = get_project_path(self._window)
self._projectless_root_path = None # type: Optional[str]
self._diagnostics.set_on_updated(
lambda file_path, client_name: global_events.publish(
"document.diagnostics",
DiagnosticsUpdate(self._window, client_name, file_path),
)
)
self._on_closed = on_closed
self._is_closing = False
self._initialization_lock = threading.Lock()
|
def __init__(
self,
window: WindowLike,
configs: ConfigRegistry,
documents: DocumentHandler,
diagnostics: WindowDiagnostics,
session_starter: "Callable",
sublime: "Any",
handler_dispatcher,
on_closed: "Optional[Callable]" = None,
) -> None:
# to move here:
# configurations.py: window_client_configs and all references
self._window = window
self._configs = configs
self._diagnostics = diagnostics
self._documents = documents
self._sessions = dict() # type: Dict[str, Session]
self._start_session = session_starter
self._sublime = sublime
self._handlers = handler_dispatcher
self._restarting = False
self._project_path = get_project_path(self._window)
self._diagnostics.set_on_updated(
lambda file_path, client_name: global_events.publish(
"document.diagnostics",
DiagnosticsUpdate(self._window, client_name, file_path),
)
)
self._on_closed = on_closed
self._is_closing = False
self._initialization_lock = threading.Lock()
|
https://github.com/sublimelsp/LSP/issues/668
|
startup, version: 3207 osx x64 channel: stable
executable: /Applications/Sublime Text.app/Contents/MacOS/Sublime Text
working dir: /
packages path: /Users/perm/Library/Application Support/Sublime Text 3/Packages
state path: /Users/perm/Library/Application Support/Sublime Text 3/Local
zip path: /Applications/Sublime Text.app/Contents/MacOS/Packages
zip path: /Users/perm/Library/Application Support/Sublime Text 3/Installed Packages
ignored_packages: ["Vintage"]
pre session restore time: 0.181761
using gpu buffer for window
startup time: 0.258246
environment variables loaded using: /bin/bash -l
reloading plugin Default.arithmetic
reloading plugin Default.auto_indent_tag
reloading plugin Default.block
reloading plugin Default.colors
reloading plugin Default.comment
reloading plugin Default.convert_color_scheme
reloading plugin Default.convert_syntax
reloading plugin Default.copy_path
reloading plugin Default.detect_indentation
reloading plugin Default.echo
reloading plugin Default.exec
reloading plugin Default.fold
reloading plugin Default.font
reloading plugin Default.goto_line
reloading plugin Default.history_list
reloading plugin Default.indentation
reloading plugin Default.install_package_control
reloading plugin Default.kill_ring
reloading plugin Default.mark
reloading plugin Default.new_templates
reloading plugin Default.open_context_url
reloading plugin Default.open_in_browser
reloading plugin Default.pane
reloading plugin Default.paragraph
reloading plugin Default.paste_from_history
reloading plugin Default.profile
reloading plugin Default.quick_panel
reloading plugin Default.rename
reloading plugin Default.run_syntax_tests
reloading plugin Default.save_on_focus_lost
reloading plugin Default.scroll
reloading plugin Default.set_unsaved_view_name
reloading plugin Default.settings
reloading plugin Default.show_scope_name
reloading plugin Default.side_bar
reloading plugin Default.sort
reloading plugin Default.switch_file
reloading plugin Default.symbol
reloading plugin Default.transform
reloading plugin Default.transpose
reloading plugin Default.ui
reloading plugin CSS.css_completions
reloading plugin Diff.diff
reloading plugin HTML.encode_html_entities
reloading plugin HTML.html_completions
reloading plugin ShellScript.ShellScript
reloading plugin 0_package_control_loader.00-package_control
reloading plugin 0_package_control_loader.01-pygments
reloading plugin 0_package_control_loader.50-markupsafe
reloading plugin 0_package_control_loader.50-pymdownx
reloading plugin 0_package_control_loader.50-python-markdown
reloading plugin 0_package_control_loader.50-pyyaml
reloading plugin 0_package_control_loader.51-python-jinja2
reloading plugin 0_package_control_loader.55-mdpopups
reloading plugin LSP.boot
reloading plugin Package Control.1_reloader
reloading plugin Package Control.2_bootstrap
reloading plugin Package Control.Package Control
plugins loaded
LSP: global configs ['jdtls=False', 'phpls=False', 'flow=False', 'reason=False', 'cquery=False', 'clangd=False', 'lsp-tsserver=False', 'eslint=False', 'ocaml=False', 'ruby=False', 'javascript-typescript-langserver=False', 'pyls=False', 'haskell-ide-engine=False', 'ra-lsp=False', 'polymer-ide=False', 'gopls=False', 'vscode-css=False', 'intelephense-ls=False', 'metals=False', 'typescript-language-server=False', 'golsp=False', 'dart=False', 'spider-nc=True', 'rls=False', 'bashls=False']
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: window 2 starting 1 initial views
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: project path changed, ending existing sessions
LSP: new path = /Users/perm/sicstus/lsptestfolder
LSP: unloading session spider-nc
LSP: --> shutdown
Package Control: Skipping automatic upgrade, last run at 2019-08-01 16:21:54, next run at 2019-08-01 17:21:54 or after
LSP: {'capabilities': {'documentFormattingProvider': True, 'implementationProvider': False, 'codeActionProvider': False, 'callHierarchyProvider': False, 'renameProvider': True, 'typeDefinitionProvider': False, 'workspaceSymbolProvider': True, 'definitionProvider': True, 'documentRangeFormattingProvider': True, 'workspace': {'workspaceFolders': {'supported': True, 'changeNotifications': True}}, 'typeHierarchyProvider': False, 'documentHighlightProvider': True, 'referencesProvider': True, 'textDocumentSync': 2, 'hoverProvider': True, 'documentSymbolProvider': True, 'completionProvider': {'triggerCharacters': ['(']}}}
LSP: --> initialized
LSP: --> textDocument/didOpen
LSP: None
LSP: --> exit
LSP: session spider-nc ended
LSP: clients for window 2 unloaded
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: LSP stdout process ended.
using gpu buffer for window
Unable to open /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: window 3 requests spider-nc for /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: starting in /Users/perm/Library/Application Support/Sublime Text 3/Packages/User
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 3 added session spider-nc
LSP: LSP stdout process ended.
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: project path changed, ending existing sessions
LSP: new path = None
LSP: unloading session spider-nc
LSP: --> shutdown
Failure writing to stdout
Traceback (most recent call last):
File "/Users/perm/Library/Application Support/Sublime Text 3/Installed Packages/LSP.sublime-package/plugin/core/transports.py", line 227, in write_stdin
self.process.stdin.flush()
BrokenPipeError: [Errno 32] Broken pipe
LSP: Communication to server closed, exiting
LSP: transport failed
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
|
BrokenPipeError
|
def _start_client(self, config: ClientConfig):
project_path = self._ensure_project_path()
if project_path is None:
debug("Cannot start without a project folder")
return
if not self._can_start_config(config.name):
debug("Already starting on this window:", config.name)
return
if not self._handlers.on_start(config.name, self._window):
return
self._window.status_message("Starting " + config.name + "...")
debug("starting in", project_path)
session = None # type: Optional[Session]
try:
session = self._start_session(
window=self._window,
project_path=project_path,
config=config,
on_pre_initialize=self._handle_pre_initialize,
on_post_initialize=self._handle_post_initialize,
on_post_exit=self._handle_post_exit,
)
except Exception as e:
message = "\n\n".join(
["Could not start {}", "{}", "Server will be disabled for this window"]
).format(config.name, str(e))
self._configs.disable(config.name)
self._sublime.message_dialog(message)
if session:
debug("window {} added session {}".format(self._window.id(), config.name))
self._sessions[config.name] = session
|
def _start_client(self, config: ClientConfig):
project_path = get_project_path(self._window)
if project_path is None:
debug("Cannot start without a project folder")
return
if not self._can_start_config(config.name):
debug("Already starting on this window:", config.name)
return
if not self._handlers.on_start(config.name, self._window):
return
self._window.status_message("Starting " + config.name + "...")
debug("starting in", project_path)
session = None # type: Optional[Session]
try:
session = self._start_session(
window=self._window,
project_path=project_path,
config=config,
on_pre_initialize=self._handle_pre_initialize,
on_post_initialize=self._handle_post_initialize,
on_post_exit=self._handle_post_exit,
)
except Exception as e:
message = "\n\n".join(
["Could not start {}", "{}", "Server will be disabled for this window"]
).format(config.name, str(e))
self._configs.disable(config.name)
self._sublime.message_dialog(message)
if session:
debug("window {} added session {}".format(self._window.id(), config.name))
self._sessions[config.name] = session
|
https://github.com/sublimelsp/LSP/issues/668
|
startup, version: 3207 osx x64 channel: stable
executable: /Applications/Sublime Text.app/Contents/MacOS/Sublime Text
working dir: /
packages path: /Users/perm/Library/Application Support/Sublime Text 3/Packages
state path: /Users/perm/Library/Application Support/Sublime Text 3/Local
zip path: /Applications/Sublime Text.app/Contents/MacOS/Packages
zip path: /Users/perm/Library/Application Support/Sublime Text 3/Installed Packages
ignored_packages: ["Vintage"]
pre session restore time: 0.181761
using gpu buffer for window
startup time: 0.258246
environment variables loaded using: /bin/bash -l
reloading plugin Default.arithmetic
reloading plugin Default.auto_indent_tag
reloading plugin Default.block
reloading plugin Default.colors
reloading plugin Default.comment
reloading plugin Default.convert_color_scheme
reloading plugin Default.convert_syntax
reloading plugin Default.copy_path
reloading plugin Default.detect_indentation
reloading plugin Default.echo
reloading plugin Default.exec
reloading plugin Default.fold
reloading plugin Default.font
reloading plugin Default.goto_line
reloading plugin Default.history_list
reloading plugin Default.indentation
reloading plugin Default.install_package_control
reloading plugin Default.kill_ring
reloading plugin Default.mark
reloading plugin Default.new_templates
reloading plugin Default.open_context_url
reloading plugin Default.open_in_browser
reloading plugin Default.pane
reloading plugin Default.paragraph
reloading plugin Default.paste_from_history
reloading plugin Default.profile
reloading plugin Default.quick_panel
reloading plugin Default.rename
reloading plugin Default.run_syntax_tests
reloading plugin Default.save_on_focus_lost
reloading plugin Default.scroll
reloading plugin Default.set_unsaved_view_name
reloading plugin Default.settings
reloading plugin Default.show_scope_name
reloading plugin Default.side_bar
reloading plugin Default.sort
reloading plugin Default.switch_file
reloading plugin Default.symbol
reloading plugin Default.transform
reloading plugin Default.transpose
reloading plugin Default.ui
reloading plugin CSS.css_completions
reloading plugin Diff.diff
reloading plugin HTML.encode_html_entities
reloading plugin HTML.html_completions
reloading plugin ShellScript.ShellScript
reloading plugin 0_package_control_loader.00-package_control
reloading plugin 0_package_control_loader.01-pygments
reloading plugin 0_package_control_loader.50-markupsafe
reloading plugin 0_package_control_loader.50-pymdownx
reloading plugin 0_package_control_loader.50-python-markdown
reloading plugin 0_package_control_loader.50-pyyaml
reloading plugin 0_package_control_loader.51-python-jinja2
reloading plugin 0_package_control_loader.55-mdpopups
reloading plugin LSP.boot
reloading plugin Package Control.1_reloader
reloading plugin Package Control.2_bootstrap
reloading plugin Package Control.Package Control
plugins loaded
LSP: global configs ['jdtls=False', 'phpls=False', 'flow=False', 'reason=False', 'cquery=False', 'clangd=False', 'lsp-tsserver=False', 'eslint=False', 'ocaml=False', 'ruby=False', 'javascript-typescript-langserver=False', 'pyls=False', 'haskell-ide-engine=False', 'ra-lsp=False', 'polymer-ide=False', 'gopls=False', 'vscode-css=False', 'intelephense-ls=False', 'metals=False', 'typescript-language-server=False', 'golsp=False', 'dart=False', 'spider-nc=True', 'rls=False', 'bashls=False']
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: window 2 starting 1 initial views
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: project path changed, ending existing sessions
LSP: new path = /Users/perm/sicstus/lsptestfolder
LSP: unloading session spider-nc
LSP: --> shutdown
Package Control: Skipping automatic upgrade, last run at 2019-08-01 16:21:54, next run at 2019-08-01 17:21:54 or after
LSP: {'capabilities': {'documentFormattingProvider': True, 'implementationProvider': False, 'codeActionProvider': False, 'callHierarchyProvider': False, 'renameProvider': True, 'typeDefinitionProvider': False, 'workspaceSymbolProvider': True, 'definitionProvider': True, 'documentRangeFormattingProvider': True, 'workspace': {'workspaceFolders': {'supported': True, 'changeNotifications': True}}, 'typeHierarchyProvider': False, 'documentHighlightProvider': True, 'referencesProvider': True, 'textDocumentSync': 2, 'hoverProvider': True, 'documentSymbolProvider': True, 'completionProvider': {'triggerCharacters': ['(']}}}
LSP: --> initialized
LSP: --> textDocument/didOpen
LSP: None
LSP: --> exit
LSP: session spider-nc ended
LSP: clients for window 2 unloaded
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: LSP stdout process ended.
using gpu buffer for window
Unable to open /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: window 3 requests spider-nc for /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: starting in /Users/perm/Library/Application Support/Sublime Text 3/Packages/User
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 3 added session spider-nc
LSP: LSP stdout process ended.
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: project path changed, ending existing sessions
LSP: new path = None
LSP: unloading session spider-nc
LSP: --> shutdown
Failure writing to stdout
Traceback (most recent call last):
File "/Users/perm/Library/Application Support/Sublime Text 3/Installed Packages/LSP.sublime-package/plugin/core/transports.py", line 227, in write_stdin
self.process.stdin.flush()
BrokenPipeError: [Errno 32] Broken pipe
LSP: Communication to server closed, exiting
LSP: transport failed
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
|
BrokenPipeError
|
def get_project_path(window: "Any") -> "Optional[str]":
"""
Returns the first project folder
"""
if len(window.folders()):
folder_paths = window.folders()
return folder_paths[0]
return None
|
def get_project_path(window: "Any") -> "Optional[str]":
"""
Returns the first project folder or the parent folder of the active view
"""
if len(window.folders()):
folder_paths = window.folders()
return folder_paths[0]
else:
view = window.active_view()
if view:
filename = view.file_name()
if filename and os.path.exists(
filename
): # https://github.com/tomv564/LSP/issues/644
project_path = os.path.dirname(filename)
debug(
"Couldn't determine project directory since no folders are open!",
"Using",
project_path,
"as a fallback.",
)
return project_path
else:
debug(
"Couldn't determine project directory since no folders are open",
"and the current file isn't saved on the disk.",
)
return None
else:
debug("No view is active in current window")
return None # https://github.com/tomv564/LSP/issues/219
|
https://github.com/sublimelsp/LSP/issues/668
|
startup, version: 3207 osx x64 channel: stable
executable: /Applications/Sublime Text.app/Contents/MacOS/Sublime Text
working dir: /
packages path: /Users/perm/Library/Application Support/Sublime Text 3/Packages
state path: /Users/perm/Library/Application Support/Sublime Text 3/Local
zip path: /Applications/Sublime Text.app/Contents/MacOS/Packages
zip path: /Users/perm/Library/Application Support/Sublime Text 3/Installed Packages
ignored_packages: ["Vintage"]
pre session restore time: 0.181761
using gpu buffer for window
startup time: 0.258246
environment variables loaded using: /bin/bash -l
reloading plugin Default.arithmetic
reloading plugin Default.auto_indent_tag
reloading plugin Default.block
reloading plugin Default.colors
reloading plugin Default.comment
reloading plugin Default.convert_color_scheme
reloading plugin Default.convert_syntax
reloading plugin Default.copy_path
reloading plugin Default.detect_indentation
reloading plugin Default.echo
reloading plugin Default.exec
reloading plugin Default.fold
reloading plugin Default.font
reloading plugin Default.goto_line
reloading plugin Default.history_list
reloading plugin Default.indentation
reloading plugin Default.install_package_control
reloading plugin Default.kill_ring
reloading plugin Default.mark
reloading plugin Default.new_templates
reloading plugin Default.open_context_url
reloading plugin Default.open_in_browser
reloading plugin Default.pane
reloading plugin Default.paragraph
reloading plugin Default.paste_from_history
reloading plugin Default.profile
reloading plugin Default.quick_panel
reloading plugin Default.rename
reloading plugin Default.run_syntax_tests
reloading plugin Default.save_on_focus_lost
reloading plugin Default.scroll
reloading plugin Default.set_unsaved_view_name
reloading plugin Default.settings
reloading plugin Default.show_scope_name
reloading plugin Default.side_bar
reloading plugin Default.sort
reloading plugin Default.switch_file
reloading plugin Default.symbol
reloading plugin Default.transform
reloading plugin Default.transpose
reloading plugin Default.ui
reloading plugin CSS.css_completions
reloading plugin Diff.diff
reloading plugin HTML.encode_html_entities
reloading plugin HTML.html_completions
reloading plugin ShellScript.ShellScript
reloading plugin 0_package_control_loader.00-package_control
reloading plugin 0_package_control_loader.01-pygments
reloading plugin 0_package_control_loader.50-markupsafe
reloading plugin 0_package_control_loader.50-pymdownx
reloading plugin 0_package_control_loader.50-python-markdown
reloading plugin 0_package_control_loader.50-pyyaml
reloading plugin 0_package_control_loader.51-python-jinja2
reloading plugin 0_package_control_loader.55-mdpopups
reloading plugin LSP.boot
reloading plugin Package Control.1_reloader
reloading plugin Package Control.2_bootstrap
reloading plugin Package Control.Package Control
plugins loaded
LSP: global configs ['jdtls=False', 'phpls=False', 'flow=False', 'reason=False', 'cquery=False', 'clangd=False', 'lsp-tsserver=False', 'eslint=False', 'ocaml=False', 'ruby=False', 'javascript-typescript-langserver=False', 'pyls=False', 'haskell-ide-engine=False', 'ra-lsp=False', 'polymer-ide=False', 'gopls=False', 'vscode-css=False', 'intelephense-ls=False', 'metals=False', 'typescript-language-server=False', 'golsp=False', 'dart=False', 'spider-nc=True', 'rls=False', 'bashls=False']
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: window 2 starting 1 initial views
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: project path changed, ending existing sessions
LSP: new path = /Users/perm/sicstus/lsptestfolder
LSP: unloading session spider-nc
LSP: --> shutdown
Package Control: Skipping automatic upgrade, last run at 2019-08-01 16:21:54, next run at 2019-08-01 17:21:54 or after
LSP: {'capabilities': {'documentFormattingProvider': True, 'implementationProvider': False, 'codeActionProvider': False, 'callHierarchyProvider': False, 'renameProvider': True, 'typeDefinitionProvider': False, 'workspaceSymbolProvider': True, 'definitionProvider': True, 'documentRangeFormattingProvider': True, 'workspace': {'workspaceFolders': {'supported': True, 'changeNotifications': True}}, 'typeHierarchyProvider': False, 'documentHighlightProvider': True, 'referencesProvider': True, 'textDocumentSync': 2, 'hoverProvider': True, 'documentSymbolProvider': True, 'completionProvider': {'triggerCharacters': ['(']}}}
LSP: --> initialized
LSP: --> textDocument/didOpen
LSP: None
LSP: --> exit
LSP: session spider-nc ended
LSP: clients for window 2 unloaded
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: LSP stdout process ended.
using gpu buffer for window
Unable to open /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: window 3 requests spider-nc for /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: starting in /Users/perm/Library/Application Support/Sublime Text 3/Packages/User
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 3 added session spider-nc
LSP: LSP stdout process ended.
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: project path changed, ending existing sessions
LSP: new path = None
LSP: unloading session spider-nc
LSP: --> shutdown
Failure writing to stdout
Traceback (most recent call last):
File "/Users/perm/Library/Application Support/Sublime Text 3/Installed Packages/LSP.sublime-package/plugin/core/transports.py", line 227, in write_stdin
self.process.stdin.flush()
BrokenPipeError: [Errno 32] Broken pipe
LSP: Communication to server closed, exiting
LSP: transport failed
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
|
BrokenPipeError
|
def update_diagnostics_panel(window: sublime.Window):
assert window, "missing window!"
if not window.is_valid():
debug("ignoring update to closed window")
return
base_dir = windows.lookup(window).get_project_path()
diagnostics_by_file = get_window_diagnostics(window)
if diagnostics_by_file is not None:
active_panel = window.active_panel()
is_active_panel = active_panel == "output.diagnostics"
if diagnostics_by_file:
panel = ensure_diagnostics_panel(window)
assert panel, "must have a panel now!"
panel.settings().set("result_base_dir", base_dir)
auto_open_panel = False
to_render = []
for file_path, source_diagnostics in diagnostics_by_file.items():
try:
relative_file_path = (
os.path.relpath(file_path, base_dir) if base_dir else file_path
)
except ValueError:
relative_file_path = file_path
if source_diagnostics:
formatted = format_diagnostics(
relative_file_path, source_diagnostics
)
if formatted:
to_render.append(formatted)
if not auto_open_panel:
auto_open_panel = has_relevant_diagnostics(
source_diagnostics
)
panel.set_read_only(False)
panel.run_command("lsp_update_panel", {"characters": "\n".join(to_render)})
panel.set_read_only(True)
if settings.auto_show_diagnostics_panel and not active_panel:
if auto_open_panel:
window.run_command("show_panel", {"panel": "output.diagnostics"})
else:
panel = window.find_output_panel("diagnostics")
if panel:
panel.run_command("lsp_clear_panel")
if is_active_panel:
window.run_command("hide_panel", {"panel": "output.diagnostics"})
|
def update_diagnostics_panel(window: sublime.Window):
assert window, "missing window!"
if not window.is_valid():
debug("ignoring update to closed window")
return
base_dir = get_project_path(window)
diagnostics_by_file = get_window_diagnostics(window)
if diagnostics_by_file is not None:
active_panel = window.active_panel()
is_active_panel = active_panel == "output.diagnostics"
if diagnostics_by_file:
panel = ensure_diagnostics_panel(window)
assert panel, "must have a panel now!"
panel.settings().set("result_base_dir", base_dir)
auto_open_panel = False
to_render = []
for file_path, source_diagnostics in diagnostics_by_file.items():
try:
relative_file_path = (
os.path.relpath(file_path, base_dir) if base_dir else file_path
)
except ValueError:
relative_file_path = file_path
if source_diagnostics:
formatted = format_diagnostics(
relative_file_path, source_diagnostics
)
if formatted:
to_render.append(formatted)
if not auto_open_panel:
auto_open_panel = has_relevant_diagnostics(
source_diagnostics
)
panel.set_read_only(False)
panel.run_command("lsp_update_panel", {"characters": "\n".join(to_render)})
panel.set_read_only(True)
if settings.auto_show_diagnostics_panel and not active_panel:
if auto_open_panel:
window.run_command("show_panel", {"panel": "output.diagnostics"})
else:
panel = window.find_output_panel("diagnostics")
if panel:
panel.run_command("lsp_clear_panel")
if is_active_panel:
window.run_command("hide_panel", {"panel": "output.diagnostics"})
|
https://github.com/sublimelsp/LSP/issues/668
|
startup, version: 3207 osx x64 channel: stable
executable: /Applications/Sublime Text.app/Contents/MacOS/Sublime Text
working dir: /
packages path: /Users/perm/Library/Application Support/Sublime Text 3/Packages
state path: /Users/perm/Library/Application Support/Sublime Text 3/Local
zip path: /Applications/Sublime Text.app/Contents/MacOS/Packages
zip path: /Users/perm/Library/Application Support/Sublime Text 3/Installed Packages
ignored_packages: ["Vintage"]
pre session restore time: 0.181761
using gpu buffer for window
startup time: 0.258246
environment variables loaded using: /bin/bash -l
reloading plugin Default.arithmetic
reloading plugin Default.auto_indent_tag
reloading plugin Default.block
reloading plugin Default.colors
reloading plugin Default.comment
reloading plugin Default.convert_color_scheme
reloading plugin Default.convert_syntax
reloading plugin Default.copy_path
reloading plugin Default.detect_indentation
reloading plugin Default.echo
reloading plugin Default.exec
reloading plugin Default.fold
reloading plugin Default.font
reloading plugin Default.goto_line
reloading plugin Default.history_list
reloading plugin Default.indentation
reloading plugin Default.install_package_control
reloading plugin Default.kill_ring
reloading plugin Default.mark
reloading plugin Default.new_templates
reloading plugin Default.open_context_url
reloading plugin Default.open_in_browser
reloading plugin Default.pane
reloading plugin Default.paragraph
reloading plugin Default.paste_from_history
reloading plugin Default.profile
reloading plugin Default.quick_panel
reloading plugin Default.rename
reloading plugin Default.run_syntax_tests
reloading plugin Default.save_on_focus_lost
reloading plugin Default.scroll
reloading plugin Default.set_unsaved_view_name
reloading plugin Default.settings
reloading plugin Default.show_scope_name
reloading plugin Default.side_bar
reloading plugin Default.sort
reloading plugin Default.switch_file
reloading plugin Default.symbol
reloading plugin Default.transform
reloading plugin Default.transpose
reloading plugin Default.ui
reloading plugin CSS.css_completions
reloading plugin Diff.diff
reloading plugin HTML.encode_html_entities
reloading plugin HTML.html_completions
reloading plugin ShellScript.ShellScript
reloading plugin 0_package_control_loader.00-package_control
reloading plugin 0_package_control_loader.01-pygments
reloading plugin 0_package_control_loader.50-markupsafe
reloading plugin 0_package_control_loader.50-pymdownx
reloading plugin 0_package_control_loader.50-python-markdown
reloading plugin 0_package_control_loader.50-pyyaml
reloading plugin 0_package_control_loader.51-python-jinja2
reloading plugin 0_package_control_loader.55-mdpopups
reloading plugin LSP.boot
reloading plugin Package Control.1_reloader
reloading plugin Package Control.2_bootstrap
reloading plugin Package Control.Package Control
plugins loaded
LSP: global configs ['jdtls=False', 'phpls=False', 'flow=False', 'reason=False', 'cquery=False', 'clangd=False', 'lsp-tsserver=False', 'eslint=False', 'ocaml=False', 'ruby=False', 'javascript-typescript-langserver=False', 'pyls=False', 'haskell-ide-engine=False', 'ra-lsp=False', 'polymer-ide=False', 'gopls=False', 'vscode-css=False', 'intelephense-ls=False', 'metals=False', 'typescript-language-server=False', 'golsp=False', 'dart=False', 'spider-nc=True', 'rls=False', 'bashls=False']
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: window 2 starting 1 initial views
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: project path changed, ending existing sessions
LSP: new path = /Users/perm/sicstus/lsptestfolder
LSP: unloading session spider-nc
LSP: --> shutdown
Package Control: Skipping automatic upgrade, last run at 2019-08-01 16:21:54, next run at 2019-08-01 17:21:54 or after
LSP: {'capabilities': {'documentFormattingProvider': True, 'implementationProvider': False, 'codeActionProvider': False, 'callHierarchyProvider': False, 'renameProvider': True, 'typeDefinitionProvider': False, 'workspaceSymbolProvider': True, 'definitionProvider': True, 'documentRangeFormattingProvider': True, 'workspace': {'workspaceFolders': {'supported': True, 'changeNotifications': True}}, 'typeHierarchyProvider': False, 'documentHighlightProvider': True, 'referencesProvider': True, 'textDocumentSync': 2, 'hoverProvider': True, 'documentSymbolProvider': True, 'completionProvider': {'triggerCharacters': ['(']}}}
LSP: --> initialized
LSP: --> textDocument/didOpen
LSP: None
LSP: --> exit
LSP: session spider-nc ended
LSP: clients for window 2 unloaded
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: LSP stdout process ended.
using gpu buffer for window
Unable to open /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: window 3 requests spider-nc for /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: starting in /Users/perm/Library/Application Support/Sublime Text 3/Packages/User
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 3 added session spider-nc
LSP: LSP stdout process ended.
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: project path changed, ending existing sessions
LSP: new path = None
LSP: unloading session spider-nc
LSP: --> shutdown
Failure writing to stdout
Traceback (most recent call last):
File "/Users/perm/Library/Application Support/Sublime Text 3/Installed Packages/LSP.sublime-package/plugin/core/transports.py", line 227, in write_stdin
self.process.stdin.flush()
BrokenPipeError: [Errno 32] Broken pipe
LSP: Communication to server closed, exiting
LSP: transport failed
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
|
BrokenPipeError
|
def handle_response(self, response: "Optional[List[Dict]]", pos) -> None:
window = self.view.window()
if response is None:
response = []
references_count = len(response)
# return if there are no references
if references_count < 1:
window.run_command("hide_panel", {"panel": "output.references"})
window.status_message("No references found")
return
word_region = self.view.word(pos)
word = self.view.substr(word_region)
base_dir = windows.lookup(window).get_project_path()
formatted_references = self._get_formatted_references(response, base_dir)
if settings.show_references_in_quick_panel:
flags = sublime.KEEP_OPEN_ON_FOCUS_LOST
if settings.quick_panel_monospace_font:
flags |= sublime.MONOSPACE_FONT
window.show_quick_panel(
self.reflist,
lambda index: self.on_ref_choice(base_dir, index),
flags,
self.get_current_ref(base_dir, word_region.begin()),
lambda index: self.on_ref_highlight(base_dir, index),
)
else:
panel = ensure_references_panel(window)
if not panel:
return
panel.settings().set("result_base_dir", base_dir)
panel.set_read_only(False)
panel.run_command("lsp_clear_panel")
window.run_command("show_panel", {"panel": "output.references"})
panel.run_command(
"append",
{
"characters": "{} references for '{}'\n\n{}".format(
references_count, word, formatted_references
),
"force": True,
"scroll_to_end": False,
},
)
# highlight all word occurrences
regions = panel.find_all(r"\b{}\b".format(word))
panel.add_regions(
"ReferenceHighlight", regions, "comment", flags=sublime.DRAW_OUTLINED
)
panel.set_read_only(True)
|
def handle_response(self, response: "Optional[List[Dict]]", pos) -> None:
window = self.view.window()
if response is None:
response = []
references_count = len(response)
# return if there are no references
if references_count < 1:
window.run_command("hide_panel", {"panel": "output.references"})
window.status_message("No references found")
return
word_region = self.view.word(pos)
word = self.view.substr(word_region)
base_dir = get_project_path(window)
formatted_references = self._get_formatted_references(response, base_dir)
if settings.show_references_in_quick_panel:
flags = sublime.KEEP_OPEN_ON_FOCUS_LOST
if settings.quick_panel_monospace_font:
flags |= sublime.MONOSPACE_FONT
window.show_quick_panel(
self.reflist,
lambda index: self.on_ref_choice(base_dir, index),
flags,
self.get_current_ref(base_dir, word_region.begin()),
lambda index: self.on_ref_highlight(base_dir, index),
)
else:
panel = ensure_references_panel(window)
if not panel:
return
panel.settings().set("result_base_dir", base_dir)
panel.set_read_only(False)
panel.run_command("lsp_clear_panel")
window.run_command("show_panel", {"panel": "output.references"})
panel.run_command(
"append",
{
"characters": "{} references for '{}'\n\n{}".format(
references_count, word, formatted_references
),
"force": True,
"scroll_to_end": False,
},
)
# highlight all word occurrences
regions = panel.find_all(r"\b{}\b".format(word))
panel.add_regions(
"ReferenceHighlight", regions, "comment", flags=sublime.DRAW_OUTLINED
)
panel.set_read_only(True)
|
https://github.com/sublimelsp/LSP/issues/668
|
startup, version: 3207 osx x64 channel: stable
executable: /Applications/Sublime Text.app/Contents/MacOS/Sublime Text
working dir: /
packages path: /Users/perm/Library/Application Support/Sublime Text 3/Packages
state path: /Users/perm/Library/Application Support/Sublime Text 3/Local
zip path: /Applications/Sublime Text.app/Contents/MacOS/Packages
zip path: /Users/perm/Library/Application Support/Sublime Text 3/Installed Packages
ignored_packages: ["Vintage"]
pre session restore time: 0.181761
using gpu buffer for window
startup time: 0.258246
environment variables loaded using: /bin/bash -l
reloading plugin Default.arithmetic
reloading plugin Default.auto_indent_tag
reloading plugin Default.block
reloading plugin Default.colors
reloading plugin Default.comment
reloading plugin Default.convert_color_scheme
reloading plugin Default.convert_syntax
reloading plugin Default.copy_path
reloading plugin Default.detect_indentation
reloading plugin Default.echo
reloading plugin Default.exec
reloading plugin Default.fold
reloading plugin Default.font
reloading plugin Default.goto_line
reloading plugin Default.history_list
reloading plugin Default.indentation
reloading plugin Default.install_package_control
reloading plugin Default.kill_ring
reloading plugin Default.mark
reloading plugin Default.new_templates
reloading plugin Default.open_context_url
reloading plugin Default.open_in_browser
reloading plugin Default.pane
reloading plugin Default.paragraph
reloading plugin Default.paste_from_history
reloading plugin Default.profile
reloading plugin Default.quick_panel
reloading plugin Default.rename
reloading plugin Default.run_syntax_tests
reloading plugin Default.save_on_focus_lost
reloading plugin Default.scroll
reloading plugin Default.set_unsaved_view_name
reloading plugin Default.settings
reloading plugin Default.show_scope_name
reloading plugin Default.side_bar
reloading plugin Default.sort
reloading plugin Default.switch_file
reloading plugin Default.symbol
reloading plugin Default.transform
reloading plugin Default.transpose
reloading plugin Default.ui
reloading plugin CSS.css_completions
reloading plugin Diff.diff
reloading plugin HTML.encode_html_entities
reloading plugin HTML.html_completions
reloading plugin ShellScript.ShellScript
reloading plugin 0_package_control_loader.00-package_control
reloading plugin 0_package_control_loader.01-pygments
reloading plugin 0_package_control_loader.50-markupsafe
reloading plugin 0_package_control_loader.50-pymdownx
reloading plugin 0_package_control_loader.50-python-markdown
reloading plugin 0_package_control_loader.50-pyyaml
reloading plugin 0_package_control_loader.51-python-jinja2
reloading plugin 0_package_control_loader.55-mdpopups
reloading plugin LSP.boot
reloading plugin Package Control.1_reloader
reloading plugin Package Control.2_bootstrap
reloading plugin Package Control.Package Control
plugins loaded
LSP: global configs ['jdtls=False', 'phpls=False', 'flow=False', 'reason=False', 'cquery=False', 'clangd=False', 'lsp-tsserver=False', 'eslint=False', 'ocaml=False', 'ruby=False', 'javascript-typescript-langserver=False', 'pyls=False', 'haskell-ide-engine=False', 'ra-lsp=False', 'polymer-ide=False', 'gopls=False', 'vscode-css=False', 'intelephense-ls=False', 'metals=False', 'typescript-language-server=False', 'golsp=False', 'dart=False', 'spider-nc=True', 'rls=False', 'bashls=False']
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: window 2 starting 1 initial views
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: project path changed, ending existing sessions
LSP: new path = /Users/perm/sicstus/lsptestfolder
LSP: unloading session spider-nc
LSP: --> shutdown
Package Control: Skipping automatic upgrade, last run at 2019-08-01 16:21:54, next run at 2019-08-01 17:21:54 or after
LSP: {'capabilities': {'documentFormattingProvider': True, 'implementationProvider': False, 'codeActionProvider': False, 'callHierarchyProvider': False, 'renameProvider': True, 'typeDefinitionProvider': False, 'workspaceSymbolProvider': True, 'definitionProvider': True, 'documentRangeFormattingProvider': True, 'workspace': {'workspaceFolders': {'supported': True, 'changeNotifications': True}}, 'typeHierarchyProvider': False, 'documentHighlightProvider': True, 'referencesProvider': True, 'textDocumentSync': 2, 'hoverProvider': True, 'documentSymbolProvider': True, 'completionProvider': {'triggerCharacters': ['(']}}}
LSP: --> initialized
LSP: --> textDocument/didOpen
LSP: None
LSP: --> exit
LSP: session spider-nc ended
LSP: clients for window 2 unloaded
LSP: window 2 requests spider-nc for /Users/perm/sicstus/lsptestfolder/foo.pl
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
LSP: starting in /Users/perm/sicstus/lsptestfolder
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 2 added session spider-nc
LSP: LSP stdout process ended.
using gpu buffer for window
Unable to open /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: window 3 requests spider-nc for /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: starting in /Users/perm/Library/Application Support/Sublime Text 3/Packages/User
LSP: starting ['/usr/bin/nc', 'localhost', '56789']
LSP: --> initialize
LSP: window 3 added session spider-nc
LSP: LSP stdout process ended.
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: no config found or enabled for view /Users/perm/Library/Application Support/Sublime Text 3/Packages/Default/Preferences.sublime-settings
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/Library/Application Support/Sublime Text 3/Packages/User as a fallback.
LSP: Couldn't determine project directory since no folders are open and the current file isn't saved on the disk.
LSP: project path changed, ending existing sessions
LSP: new path = None
LSP: unloading session spider-nc
LSP: --> shutdown
Failure writing to stdout
Traceback (most recent call last):
File "/Users/perm/Library/Application Support/Sublime Text 3/Installed Packages/LSP.sublime-package/plugin/core/transports.py", line 227, in write_stdin
self.process.stdin.flush()
BrokenPipeError: [Errno 32] Broken pipe
LSP: Communication to server closed, exiting
LSP: transport failed
LSP: Couldn't determine project directory since no folders are open! Using /Users/perm/sicstus/lsptestfolder as a fallback.
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
reloading settings Packages/User/Preferences.sublime-settings
|
BrokenPipeError
|
def open_and_apply_edits(self, path, file_changes):
view = self.window.open_file(path)
if view:
if view.is_loading():
# TODO: wait for event instead.
sublime.set_timeout_async(
lambda: view.run_command(
"lsp_apply_document_edit", {"changes": file_changes}
),
500,
)
else:
view.run_command("lsp_apply_document_edit", {"changes": file_changes})
else:
debug("view not found to apply", path, file_changes)
|
def open_and_apply_edits(self, path, file_changes):
view = self.window.open_file(path)
if view:
if view.is_loading():
# TODO: wait for event instead.
sublime.set_timeout_async(
lambda: view.run_command(
"lsp_apply_document_edit", {"changes": file_changes}
),
500,
)
else:
view.run_command(
"lsp_apply_document_edit",
{"changes": file_changes, "show_status": False},
)
else:
debug("view not found to apply", path, file_changes)
|
https://github.com/sublimelsp/LSP/issues/592
|
Traceback (most recent call last):
File "/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py", line 1082, in run_
return self.run(edit, **args)
TypeError: run() got an unexpected keyword argument 'show_status'
|
TypeError
|
def is_applicable(cls, settings):
syntax = settings.get("syntax")
return is_supported_syntax(syntax) if syntax else False
|
def is_applicable(cls, settings):
syntax = settings.get("syntax")
if syntax is not None:
return is_supported_syntax(syntax, client_configs.all)
else:
return False
|
https://github.com/sublimelsp/LSP/issues/532
|
Traceback (most recent call last):
File "F:\SublimeText\sublime_plugin.py", line 298, in on_api_ready
plc()
File "F:\SublimeText\Data\Packages\LSP\boot.py", line 30, in plugin_loaded
startup()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\main.py", line 25, in startup
start_active_window()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\main.py", line 44, in start_active_window
windows.lookup(window).start_active_views()
File "F:\SublimeText\Data\Packages\LSP\plugin\core\windows.py", line 336, in start_active_views
self._initialize_on_open(view)
File "F:\SublimeText\Data\Packages\LSP\plugin\core\windows.py", line 348, in _initialize_on_open
self._configs.syntax_configs(view))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\configurations.py", line 120, in syntax_configs
return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\configurations.py", line 120, in <lambda>
return list(filter(lambda c: config_supports_syntax(c, syntax) and c.enabled, self.all))
File "F:\SublimeText\Data\Packages\LSP\plugin\core\types.py", line 81, in config_supports_syntax
if re.search(r'|'.join(r'\b%s\b' % re.escape(s) for s in language.syntaxes), syntax, re.IGNORECASE):
File "./python3.3/re.py", line 161, in search
TypeError: expected string or buffer
|
TypeError
|
def handle_response(self, response: "Optional[Dict]"):
global resolvable_completion_items
if self.state == CompletionState.REQUESTING:
items = [] # type: List[Dict]
if isinstance(response, dict):
items = response["items"] or []
elif isinstance(response, list):
items = response
items = sorted(items, key=lambda item: item.get("sortText") or item["label"])
self.completions = list(self.format_completion(item) for item in items)
if self.has_resolve_provider:
resolvable_completion_items = items
# if insert_best_completion was just ran, undo it before presenting new completions.
prev_char = self.view.substr(self.view.sel()[0].begin() - 1)
if prev_char.isspace():
if last_text_command == "insert_best_completion":
self.view.run_command("undo")
self.state = CompletionState.APPLYING
self.view.run_command("hide_auto_complete")
self.run_auto_complete()
elif self.state == CompletionState.CANCELLING:
if self.next_request:
prefix, locations = self.next_request
self.do_request(prefix, locations)
self.state = CompletionState.IDLE
else:
debug("Got unexpected response while in state {}".format(self.state))
|
def handle_response(self, response: "Optional[Dict]"):
global resolvable_completion_items
if self.state == CompletionState.REQUESTING:
items = [] # type: List[Dict]
if isinstance(response, dict):
items = response["items"]
elif isinstance(response, list):
items = response
items = sorted(items, key=lambda item: item.get("sortText") or item["label"])
self.completions = list(self.format_completion(item) for item in items)
if self.has_resolve_provider:
resolvable_completion_items = items
# if insert_best_completion was just ran, undo it before presenting new completions.
prev_char = self.view.substr(self.view.sel()[0].begin() - 1)
if prev_char.isspace():
if last_text_command == "insert_best_completion":
self.view.run_command("undo")
self.state = CompletionState.APPLYING
self.view.run_command("hide_auto_complete")
self.run_auto_complete()
elif self.state == CompletionState.CANCELLING:
if self.next_request:
prefix, locations = self.next_request
self.do_request(prefix, locations)
self.state = CompletionState.IDLE
else:
debug("Got unexpected response while in state {}".format(self.state))
|
https://github.com/sublimelsp/LSP/issues/494
|
LSP: --> textDocument/completion
LSP: {'isIncomplete': False, 'items': None}
Error handling server payload
Traceback (most recent call last):
File "/Users/margus/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/core/rpc.py", line 141, in receive_payload
self.response_handler(payload)
File "/Users/margus/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/core/rpc.py", line 160, in response_handler
handler(response["result"])
File "/Users/margus/Library/Application Support/Sublime Text 3/Packages/LSP/plugin/completion.py", line 310, in handle_response
items = sorted(items, key=lambda item: item.get("sortText") or item["label"])
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def build(
path_source,
path_output,
config,
toc,
warningiserror,
nitpick,
keep_going,
freshenv,
builder,
custom_builder,
verbose,
quiet,
individualpages,
get_config_only=False,
):
"""Convert your book's or page's content to HTML or a PDF."""
from .. import __version__ as jbv
from ..sphinx import build_sphinx
if not get_config_only:
click.secho(f"Running Jupyter-Book v{jbv}", bold=True, fg="green")
# Paths for the notebooks
PATH_SRC_FOLDER = Path(path_source).absolute()
config_overrides = {}
found_config = find_config_path(PATH_SRC_FOLDER)
BUILD_PATH = path_output if path_output is not None else found_config[0]
# Set config for --individualpages option (pages, documents)
if individualpages:
if builder != "pdflatex":
_error(
"""
Specified option --individualpages only works with the
following builders:
pdflatex
"""
)
# Build Page
if not PATH_SRC_FOLDER.is_dir():
# it is a single file
build_type = "page"
subdir = None
PATH_SRC = Path(path_source)
PATH_SRC_FOLDER = PATH_SRC.parent.absolute()
PAGE_NAME = PATH_SRC.with_suffix("").name
# checking if the page is inside a sub directory
# then changing the build_path accordingly
if str(BUILD_PATH) in str(PATH_SRC_FOLDER):
subdir = str(PATH_SRC_FOLDER.relative_to(BUILD_PATH))
if subdir and subdir != ".":
subdir = subdir.replace("/", "-")
subdir = subdir + "-" + PAGE_NAME
BUILD_PATH = Path(BUILD_PATH).joinpath("_build", "_page", subdir)
else:
BUILD_PATH = Path(BUILD_PATH).joinpath("_build", "_page", PAGE_NAME)
# Find all files that *aren't* the page we're building and exclude them
to_exclude = glob(str(PATH_SRC_FOLDER.joinpath("**", "*")), recursive=True)
to_exclude = [
op.relpath(ifile, PATH_SRC_FOLDER)
for ifile in to_exclude
if ifile != str(PATH_SRC.absolute())
]
to_exclude.extend(["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"])
# Now call the Sphinx commands to build
config_overrides = {
"master_doc": PAGE_NAME,
"globaltoc_path": "",
"exclude_patterns": to_exclude,
"html_theme_options": {"single_page": True},
# --individualpages option set to True for page call
"latex_individualpages": True,
}
# Build Project
else:
build_type = "book"
PAGE_NAME = None
BUILD_PATH = Path(BUILD_PATH).joinpath("_build")
# Table of contents
if toc is None:
toc = PATH_SRC_FOLDER.joinpath("_toc.yml")
else:
toc = Path(toc)
if not toc.exists():
_error(
"Couldn't find a Table of Contents file. To auto-generate "
f"one, run\n\n\tjupyter-book toc {path_source}"
)
# Check whether the table of contents has changed. If so we rebuild all
build_files = list(BUILD_PATH.joinpath(".doctrees").rglob("*"))
if toc and build_files:
toc_modified = toc.stat().st_mtime
build_modified = max([os.stat(ii).st_mtime for ii in build_files])
# If the toc file has been modified after the build we need to force rebuild
freshenv = toc_modified > build_modified
config_overrides["globaltoc_path"] = toc.as_posix()
# Builder-specific overrides
if builder == "pdfhtml":
config_overrides["html_theme_options"] = {"single_page": True}
# --individualpages option passthrough
config_overrides["latex_individualpages"] = individualpages
# Use the specified configuration file, or one found in the root directory
path_config = config or (
found_config[0].joinpath("_config.yml") if found_config[1] else None
)
if path_config and not Path(path_config).exists():
raise IOError(f"Config file path given, but not found: {path_config}")
if builder in ["html", "pdfhtml", "linkcheck"]:
OUTPUT_PATH = BUILD_PATH.joinpath("html")
elif builder in ["latex", "pdflatex"]:
OUTPUT_PATH = BUILD_PATH.joinpath("latex")
elif builder in ["dirhtml"]:
OUTPUT_PATH = BUILD_PATH.joinpath("dirhtml")
elif builder in ["custom"]:
OUTPUT_PATH = BUILD_PATH.joinpath(custom_builder)
BUILDER_OPTS["custom"] = custom_builder
if nitpick:
config_overrides["nitpicky"] = True
# If we only wan config (e.g. for printing/validation), stop here
if get_config_only:
return (path_config, PATH_SRC_FOLDER, config_overrides)
# print information about the build
click.echo(
click.style("Source Folder: ", bold=True, fg="blue")
+ click.format_filename(f"{PATH_SRC_FOLDER}")
)
click.echo(
click.style("Config Path: ", bold=True, fg="blue")
+ click.format_filename(f"{path_config}")
)
click.echo(
click.style("Output Path: ", bold=True, fg="blue")
+ click.format_filename(f"{OUTPUT_PATH}")
)
# Now call the Sphinx commands to build
result = build_sphinx(
PATH_SRC_FOLDER,
OUTPUT_PATH,
toc,
noconfig=True,
path_config=path_config,
confoverrides=config_overrides,
builder=BUILDER_OPTS[builder],
warningiserror=warningiserror,
keep_going=keep_going,
freshenv=freshenv,
verbosity=verbose,
quiet=quiet > 0,
really_quiet=quiet > 1,
)
builder_specific_actions(
result, builder, OUTPUT_PATH, build_type, PAGE_NAME, click.echo
)
|
def build(
path_source,
path_output,
config,
toc,
warningiserror,
nitpick,
keep_going,
freshenv,
builder,
custom_builder,
verbose,
quiet,
individualpages,
get_config_only=False,
):
"""Convert your book's or page's content to HTML or a PDF."""
from .. import __version__ as jbv
from ..sphinx import build_sphinx
if not get_config_only:
click.secho(f"Running Jupyter-Book v{jbv}", bold=True, fg="green")
# Paths for the notebooks
PATH_SRC_FOLDER = Path(path_source).absolute()
config_overrides = {}
found_config = find_config_path(PATH_SRC_FOLDER)
BUILD_PATH = path_output if path_output is not None else found_config[0]
# Set config for --individualpages option (pages, documents)
if individualpages:
if builder != "pdflatex":
_error(
"""
Specified option --individualpages only works with the
following builders:
pdflatex
"""
)
# Build Page
if not PATH_SRC_FOLDER.is_dir():
# it is a single file
build_type = "page"
subdir = None
PATH_SRC = Path(path_source)
PATH_SRC_FOLDER = PATH_SRC.parent.absolute()
PAGE_NAME = PATH_SRC.with_suffix("").name
# checking if the page is inside a sub directory
# then changing the build_path accordingly
if str(BUILD_PATH) in str(PATH_SRC_FOLDER):
subdir = str(PATH_SRC_FOLDER.relative_to(BUILD_PATH))
if subdir and subdir != ".":
subdir = subdir.replace("/", "-")
subdir = subdir + "-" + PAGE_NAME
BUILD_PATH = Path(BUILD_PATH).joinpath("_build", "_page", subdir)
else:
BUILD_PATH = Path(BUILD_PATH).joinpath("_build", "_page", PAGE_NAME)
# Find all files that *aren't* the page we're building and exclude them
to_exclude = glob(str(PATH_SRC_FOLDER.joinpath("**", "*")), recursive=True)
to_exclude = [
op.relpath(ifile, PATH_SRC_FOLDER)
for ifile in to_exclude
if ifile != str(PATH_SRC.absolute())
]
to_exclude.extend(["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"])
# Now call the Sphinx commands to build
config_overrides = {
"master_doc": PAGE_NAME,
"globaltoc_path": "",
"exclude_patterns": to_exclude,
"html_theme_options": {"single_page": True},
# --individualpages option set to True for page call
"latex_individualpages": True,
}
# Build Project
else:
build_type = "book"
PAGE_NAME = None
BUILD_PATH = Path(BUILD_PATH).joinpath("_build")
# Table of contents
if toc is None:
toc = PATH_SRC_FOLDER.joinpath("_toc.yml")
else:
toc = Path(toc)
if not toc.exists():
_error(
"Couldn't find a Table of Contents file. To auto-generate "
f"one, run\n\n\tjupyter-book toc {path_source}"
)
# Check whether the table of contents has changed. If so we rebuild all
if toc and BUILD_PATH.joinpath(".doctrees").exists():
toc_modified = toc.stat().st_mtime
build_files = BUILD_PATH.rglob(".doctrees/*")
build_modified = max([os.stat(ii).st_mtime for ii in build_files])
# If the toc file has been modified after the build we need to force rebuild
freshenv = toc_modified > build_modified
config_overrides["globaltoc_path"] = toc.as_posix()
# Builder-specific overrides
if builder == "pdfhtml":
config_overrides["html_theme_options"] = {"single_page": True}
# --individualpages option passthrough
config_overrides["latex_individualpages"] = individualpages
# Use the specified configuration file, or one found in the root directory
path_config = config or (
found_config[0].joinpath("_config.yml") if found_config[1] else None
)
if path_config and not Path(path_config).exists():
raise IOError(f"Config file path given, but not found: {path_config}")
if builder in ["html", "pdfhtml", "linkcheck"]:
OUTPUT_PATH = BUILD_PATH.joinpath("html")
elif builder in ["latex", "pdflatex"]:
OUTPUT_PATH = BUILD_PATH.joinpath("latex")
elif builder in ["dirhtml"]:
OUTPUT_PATH = BUILD_PATH.joinpath("dirhtml")
elif builder in ["custom"]:
OUTPUT_PATH = BUILD_PATH.joinpath(custom_builder)
BUILDER_OPTS["custom"] = custom_builder
if nitpick:
config_overrides["nitpicky"] = True
# If we only wan config (e.g. for printing/validation), stop here
if get_config_only:
return (path_config, PATH_SRC_FOLDER, config_overrides)
# print information about the build
click.echo(
click.style("Source Folder: ", bold=True, fg="blue")
+ click.format_filename(f"{PATH_SRC_FOLDER}")
)
click.echo(
click.style("Config Path: ", bold=True, fg="blue")
+ click.format_filename(f"{path_config}")
)
click.echo(
click.style("Output Path: ", bold=True, fg="blue")
+ click.format_filename(f"{OUTPUT_PATH}")
)
# Now call the Sphinx commands to build
result = build_sphinx(
PATH_SRC_FOLDER,
OUTPUT_PATH,
toc,
noconfig=True,
path_config=path_config,
confoverrides=config_overrides,
builder=BUILDER_OPTS[builder],
warningiserror=warningiserror,
keep_going=keep_going,
freshenv=freshenv,
verbosity=verbose,
quiet=quiet > 0,
really_quiet=quiet > 1,
)
builder_specific_actions(
result, builder, OUTPUT_PATH, build_type, PAGE_NAME, click.echo
)
|
https://github.com/executablebooks/jupyter-book/issues/1142
|
(wintest) C:\Users\laaltenburg>jupyter-book build newbook
Running Jupyter-Book v0.8.3
Traceback (most recent call last):
File "c:\users\laaltenburg\.conda\envs\wintest\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\users\laaltenburg\.conda\envs\wintest\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\laaltenburg\.conda\envs\wintest\Scripts\jupyter-book.exe\__main__.py", line 7, in <module>
File "c:\users\laaltenburg\.conda\envs\wintest\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "c:\users\laaltenburg\.conda\envs\wintest\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "c:\users\laaltenburg\.conda\envs\wintest\lib\site-packages\click\core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "c:\users\laaltenburg\.conda\envs\wintest\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "c:\users\laaltenburg\.conda\envs\wintest\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "c:\users\laaltenburg\.conda\envs\wintest\lib\site-packages\jupyter_book\commands\__init__.py", line 199, in build
build_modified = max([os.stat(ii).st_mtime for ii in build_files])
ValueError: max() arg is an empty sequence_
|
ValueError
|
def _gen_toctree(options, subsections, parent_suff):
options = "\n".join([f":{key}: {val}" for key, val in options.items()])
# Generate the TOC from our options/pages
toctree_text_md = """
```{{toctree}}
:hidden:
:titlesonly:
{options}
{sections}
```
"""
toctree_text_rst = """
.. toctree::
:hidden:
:titlesonly:
{options}
{sections}
"""
if parent_suff in [".ipynb", ".md"]:
toctree_template = toctree_text_md
elif parent_suff == ".rst":
toctree_template = toctree_text_rst
else:
return ""
# Create the markdown directive for our toctree
toctree = dedent(toctree_template).format(
options=options, sections="\n".join(subsections)
)
return toctree
|
def _gen_toctree(options, subsections, parent_suff):
options = "\n".join([f":{key}: {val}" for key, val in options.items()])
# Generate the TOC from our options/pages
toctree_text_md = """
```{{toctree}}
:hidden:
:titlesonly:
{options}
{sections}
```
"""
toctree_text_rst = """
.. toctree::
:hidden:
:titlesonly:
{options}
{sections}
"""
if parent_suff in [".ipynb", ".md"]:
toctree_template = toctree_text_md
elif parent_suff == ".rst":
toctree_template = toctree_text_rst
# Create the markdown directive for our toctree
toctree = dedent(toctree_template).format(
options=options, sections="\n".join(subsections)
)
return toctree
|
https://github.com/executablebooks/jupyter-book/issues/1104
|
Running Sphinx v3.3.0
making output directory... done
myst v0.12.10: MdParserConfig(renderer='sphinx', commonmark_only=False, dmath_enable=True, dmath_allow_labels=True, dmath_allow_space=True, dmath_allow_digits=True, amsmath_enable=False, deflist_enable=False, update_mathjax=True, admonition_enable=False, figure_enable=False, disable_syntax=[], html_img_enable=False, url_schemes=['mailto', 'http', 'https'], heading_anchors=None)
building [mo]: targets for 0 po files that are out of date
building [html]: targets for 2 source files that are out of date
updating environment: [new config] 2 added, 0 changed, 0 removed
reading sources... [ 50%] notebooks
Extension error:
Handler <function add_toctree at 0x7f7b29674790> for event 'source-read' threw an exception (exception: local variable 'toctree_template' referenced before assignment)
Traceback (most recent call last):
File "/opt/conda/lib/python3.8/site-packages/sphinx/events.py", line 110, in emit
results.append(listener.handler(self.app, *args))
File "/opt/conda/lib/python3.8/site-packages/jupyter_book/toc.py", line 140, in add_toctree
toctrees.append(_gen_toctree(toc_options, toc_sections, parent_suff))
File "/opt/conda/lib/python3.8/site-packages/jupyter_book/toc.py", line 224, in _gen_toctree
toctree = dedent(toctree_template).format(
UnboundLocalError: local variable 'toctree_template' referenced before assignment
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/lib/python3.8/site-packages/jupyter_book/sphinx.py", line 141, in build_sphinx
app.build(force_all, filenames)
File "/opt/conda/lib/python3.8/site-packages/sphinx/application.py", line 352, in build
self.builder.build_update()
File "/opt/conda/lib/python3.8/site-packages/sphinx/builders/__init__.py", line 297, in build_update
self.build(to_build,
File "/opt/conda/lib/python3.8/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/opt/conda/lib/python3.8/site-packages/sphinx/builders/__init__.py", line 418, in read
self._read_serial(docnames)
File "/opt/conda/lib/python3.8/site-packages/sphinx/builders/__init__.py", line 439, in _read_serial
self.read_doc(docname)
File "/opt/conda/lib/python3.8/site-packages/sphinx/builders/__init__.py", line 479, in read_doc
doctree = read_doc(self.app, self.env, self.env.doc2path(docname))
File "/opt/conda/lib/python3.8/site-packages/sphinx/io.py", line 223, in read_doc
pub.publish()
File "/opt/conda/lib/python3.8/site-packages/docutils/core.py", line 216, in publish
self.document = self.reader.read(self.source, self.parser,
File "/opt/conda/lib/python3.8/site-packages/sphinx/io.py", line 127, in read
self.input = self.read_source(settings.env)
File "/opt/conda/lib/python3.8/site-packages/sphinx/io.py", line 137, in read_source
env.events.emit('source-read', env.docname, arg)
File "/opt/conda/lib/python3.8/site-packages/sphinx/events.py", line 117, in emit
raise ExtensionError(__("Handler %r for event %r threw an exception") %
sphinx.errors.ExtensionError: Handler <function add_toctree at 0x7f7b29674790> for event 'source-read' threw an exception (exception: local variable 'toctree_template' referenced before assignment)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/bin/jupyter-book", line 8, in <module>
sys.exit(main())
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/jupyter_book/commands/__init__.py", line 259, in build
builder_specific_actions(
File "/opt/conda/lib/python3.8/site-packages/jupyter_book/commands/__init__.py", line 500, in builder_specific_actions
raise RuntimeError(_message_box(msg, color="red", doprint=False)) from result
RuntimeError:
===============================================================================
There was an error in building your book. Look above for the cause.
===============================================================================
|
UnboundLocalError
|
def _get_step(self, axis):
# TODO: need to check if this is working fine, particularly with
"""Use to determine the size of the widget with support for non
uniform axis.
"""
if axis.index >= axis.size - 1:
return axis.index2value(axis.index) - axis.index2value(axis.index - 1)
else:
return axis.index2value(axis.index + 1) - axis.index2value(axis.index)
|
def _get_step(self, axis):
# TODO: need to check if this is working fine, particularly with
"""Use to determine the size of the widget with support for non
uniform axis.
"""
return axis.index2value(axis.index + 1) - axis.index2value(axis.index)
|
https://github.com/hyperspy/hyperspy/issues/2452
|
mod.plot()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-6-97bc6e2748ea> in <module>
----> 1 mod.plot()
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\models\model1d.py in plot(self, plot_components, **kwargs)
658
659 # If new coordinates are assigned
--> 660 self.signal.plot(**kwargs)
661 _plot = self.signal._plot
662 l1 = _plot.signal_plot.ax_lines[0]
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\signal.py in plot(self, navigator, axes_manager, plot_markers, **kwargs)
2159 " \"slider\", None, a Signal instance")
2160
-> 2161 self._plot.plot(**kwargs)
2162 self.events.data_changed.connect(self.update_plot, [])
2163 if self._plot.signal_plot:
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\mpl_he.py in plot(self, **kwargs)
174 pointer = self.assign_pointer()
175 if pointer is not None:
--> 176 self.pointer = pointer(self.axes_manager)
177 self.pointer.color = 'red'
178 self.pointer.connect_navigate()
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\_widgets\rectangles.py in __init__(self, axes_manager, **kwargs)
39
40 def __init__(self, axes_manager, **kwargs):
---> 41 super(SquareWidget, self).__init__(axes_manager, **kwargs)
42
43 def _set_patch(self):
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\widget.py in __init__(self, axes_manager, **kwargs)
768
769 def __init__(self, axes_manager, **kwargs):
--> 770 super(Widget2DBase, self).__init__(axes_manager, **kwargs)
771 self.border_thickness = 2
772
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\widget.py in __init__(self, axes_manager, **kwargs)
546 def __init__(self, axes_manager, **kwargs):
547 super(ResizableDraggableWidgetBase, self).__init__(
--> 548 axes_manager, **kwargs)
549 if not self.axes:
550 self._size = np.array([1])
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\widget.py in __init__(self, axes_manager, **kwargs)
337 if self.axes_manager is not None:
338 if self.axes_manager.navigation_dimension > 0:
--> 339 self.axes = self.axes_manager.navigation_axes[0:1]
340 else:
341 self.axes = self.axes_manager.signal_axes[0:1]
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\widget.py in <lambda>(s, v)
97
98 axes = property(lambda s: s._get_axes(),
---> 99 lambda s, v: s._set_axes(v))
100
101 def is_on(self):
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\widget.py in _set_axes(self, axes)
570 super(ResizableDraggableWidgetBase, self)._set_axes(axes)
571 if self.axes:
--> 572 self._size = np.array([self._get_step(ax) for ax in self.axes])
573
574 def _get_step(self, axis):
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\widget.py in <listcomp>(.0)
570 super(ResizableDraggableWidgetBase, self)._set_axes(axes)
571 if self.axes:
--> 572 self._size = np.array([self._get_step(ax) for ax in self.axes])
573
574 def _get_step(self, axis):
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\drawing\widget.py in _get_step(self, axis)
577 uniform axis.
578 """
--> 579 return axis.index2value(axis.index + 1) - axis.index2value(axis.index)
580
581 def _get_size(self):
c:\users\nicolastappy\documents\git\hyperspy\hyperspy\axes.py in index2value(self, index)
503 return self.axis[index.ravel()].reshape(index.shape)
504 else:
--> 505 return self.axis[index]
506
507 def value2index(self, value, rounding=round):
IndexError: index 4 is out of bounds for axis 0 with size 4
|
IndexError
|
def load_1D_EDS_SEM_spectrum():
"""
Load an EDS-SEM spectrum
Notes
-----
- Sample: EDS-TM002 provided by BAM (www.webshop.bam.de)
- SEM Microscope: Nvision40 Carl Zeiss
- EDS Detector: X-max 80 from Oxford Instrument
"""
from hyperspy.io import load
file_path = os.sep.join(
[
os.path.dirname(__file__),
"eds",
"example_signals",
"1D_EDS_SEM_Spectrum.hspy",
]
)
return load(file_path)
|
def load_1D_EDS_SEM_spectrum():
"""
Load an EDS-SEM spectrum
Notes
-----
- Sample: EDS-TM002 provided by BAM (www.webshop.bam.de)
- SEM Microscope: Nvision40 Carl Zeiss
- EDS Detector: X-max 80 from Oxford Instrument
"""
from hyperspy.io import load
file_path = os.sep.join(
[
os.path.dirname(__file__),
"eds",
"example_signals",
"1D_EDS_SEM_Spectrum.hdf5",
]
)
return load(file_path)
|
https://github.com/hyperspy/hyperspy/issues/2429
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in overwrite_dataset(group, data, key, signal_axes, chunks, **kwds)
570 # contains the chunk shape guessed by h5py
--> 571 dset = group.require_dataset(key, **these_kwds)
572 got_data = True
~\.conda\envs\py5\lib\site-packages\h5py\_hl\group.py in require_dataset(self, name, shape, dtype, exact, **kwds)
190 if not name in self:
--> 191 return self.create_dataset(name, *(shape, dtype), **kwds)
192
~\.conda\envs\py5\lib\site-packages\h5py\_hl\group.py in create_dataset(self, name, shape, dtype, data, **kwds)
135 with phil:
--> 136 dsid = dataset.make_new_dset(self, shape, dtype, data, **kwds)
137 dset = dataset.Dataset(dsid)
~\.conda\envs\py5\lib\site-packages\h5py\_hl\dataset.py in make_new_dset(parent, shape, dtype, data, chunks, compression, shuffle, fletcher32, maxshape, compression_opts, fillvalue, scaleoffset, track_times, external, track_order, dcpl)
117 dtype = numpy.dtype(dtype)
--> 118 tid = h5t.py_create(dtype, logical=1)
119
h5py\h5t.pyx in h5py.h5t.py_create()
h5py\h5t.pyx in h5py.h5t.py_create()
h5py\h5t.pyx in h5py.h5t.py_create()
TypeError: No conversion path for dtype: dtype('<U2')
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-7-56c3607e5bfd> in <module>
1 s = hs.datasets.example_signals.EDS_TEM_Spectrum()
----> 2 s.save('eds.hspy')
c:\users\thomasaar\documents\github\hyperspy\hyperspy\signal.py in save(self, filename, overwrite, extension, **kwds)
2229 basename, ext = os.path.splitext(filename)
2230 filename = basename + '.' + extension
-> 2231 io.save(filename, self, overwrite=overwrite, **kwds)
2232
2233 def _replot(self):
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io.py in save(filename, signal, overwrite, **kwds)
602 "False.")
603 if write:
--> 604 writer.file_writer(filename, signal, **kwds)
605 _logger.info('The %s file was created' % filename)
606 folder, filename = os.path.split(os.path.abspath(filename))
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in file_writer(filename, signal, *args, **kwds)
753 smd.record_by = ""
754 try:
--> 755 write_signal(signal, expg, **kwds)
756 except BaseException:
757 raise
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in write_signal(signal, group, **kwds)
708 metadata_dict["_internal_parameters"] = \
709 metadata_dict.pop("_HyperSpy")
--> 710 dict2hdfgroup(metadata_dict, mapped_par, **kwds)
711 original_par = group.create_group(original_metadata)
712 dict2hdfgroup(signal.original_metadata.as_dictionary(), original_par,
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
441 for key, value in dictionary.items():
442 if isinstance(value, dict):
--> 443 dict2hdfgroup(value, group.create_group(key),
444 **kwds)
445 elif isinstance(value, DictionaryTreeBrowser):
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
451 write_signal(value, group.require_group(kn))
452 elif isinstance(value, (np.ndarray, h5py.Dataset, da.Array)):
--> 453 overwrite_dataset(group, value, key, **kwds)
454 elif value is None:
455 group.attrs[key] = '_None_'
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in overwrite_dataset(group, data, key, signal_axes, chunks, **kwds)
574 # if the shape or dtype/etc do not match,
575 # we delete the old one and create new in the next loop run
--> 576 del group[key]
577 if dset == data:
578 # just a reference to already created thing
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
~\.conda\envs\py5\lib\site-packages\h5py\_hl\group.py in __delitem__(self, name)
397 def __delitem__(self, name):
398 """ Delete (unlink) an item from this group. """
--> 399 self.id.unlink(self._e(name))
400
401 @with_phil
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\h5g.pyx in h5py.h5g.GroupID.unlink()
KeyError: "Couldn't delete link (callback link pointer is NULL (specified link may be '.' or not exist))"```
|
TypeError
|
def load_1D_EDS_TEM_spectrum():
"""
Load an EDS-TEM spectrum
Notes
-----
- Sample: FePt bimetallic nanoparticles
- SEM Microscope: Tecnai Osiris 200 kV D658 AnalyticalTwin
- EDS Detector: Super-X 4 detectors Brucker
"""
from hyperspy.io import load
file_path = os.sep.join(
[
os.path.dirname(__file__),
"eds",
"example_signals",
"1D_EDS_TEM_Spectrum.hspy",
]
)
return load(file_path)
|
def load_1D_EDS_TEM_spectrum():
"""
Load an EDS-TEM spectrum
Notes
-----
- Sample: FePt bimetallic nanoparticles
- SEM Microscope: Tecnai Osiris 200 kV D658 AnalyticalTwin
- EDS Detector: Super-X 4 detectors Brucker
"""
from hyperspy.io import load
file_path = os.sep.join(
[
os.path.dirname(__file__),
"eds",
"example_signals",
"1D_EDS_TEM_Spectrum.hdf5",
]
)
return load(file_path)
|
https://github.com/hyperspy/hyperspy/issues/2429
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in overwrite_dataset(group, data, key, signal_axes, chunks, **kwds)
570 # contains the chunk shape guessed by h5py
--> 571 dset = group.require_dataset(key, **these_kwds)
572 got_data = True
~\.conda\envs\py5\lib\site-packages\h5py\_hl\group.py in require_dataset(self, name, shape, dtype, exact, **kwds)
190 if not name in self:
--> 191 return self.create_dataset(name, *(shape, dtype), **kwds)
192
~\.conda\envs\py5\lib\site-packages\h5py\_hl\group.py in create_dataset(self, name, shape, dtype, data, **kwds)
135 with phil:
--> 136 dsid = dataset.make_new_dset(self, shape, dtype, data, **kwds)
137 dset = dataset.Dataset(dsid)
~\.conda\envs\py5\lib\site-packages\h5py\_hl\dataset.py in make_new_dset(parent, shape, dtype, data, chunks, compression, shuffle, fletcher32, maxshape, compression_opts, fillvalue, scaleoffset, track_times, external, track_order, dcpl)
117 dtype = numpy.dtype(dtype)
--> 118 tid = h5t.py_create(dtype, logical=1)
119
h5py\h5t.pyx in h5py.h5t.py_create()
h5py\h5t.pyx in h5py.h5t.py_create()
h5py\h5t.pyx in h5py.h5t.py_create()
TypeError: No conversion path for dtype: dtype('<U2')
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-7-56c3607e5bfd> in <module>
1 s = hs.datasets.example_signals.EDS_TEM_Spectrum()
----> 2 s.save('eds.hspy')
c:\users\thomasaar\documents\github\hyperspy\hyperspy\signal.py in save(self, filename, overwrite, extension, **kwds)
2229 basename, ext = os.path.splitext(filename)
2230 filename = basename + '.' + extension
-> 2231 io.save(filename, self, overwrite=overwrite, **kwds)
2232
2233 def _replot(self):
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io.py in save(filename, signal, overwrite, **kwds)
602 "False.")
603 if write:
--> 604 writer.file_writer(filename, signal, **kwds)
605 _logger.info('The %s file was created' % filename)
606 folder, filename = os.path.split(os.path.abspath(filename))
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in file_writer(filename, signal, *args, **kwds)
753 smd.record_by = ""
754 try:
--> 755 write_signal(signal, expg, **kwds)
756 except BaseException:
757 raise
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in write_signal(signal, group, **kwds)
708 metadata_dict["_internal_parameters"] = \
709 metadata_dict.pop("_HyperSpy")
--> 710 dict2hdfgroup(metadata_dict, mapped_par, **kwds)
711 original_par = group.create_group(original_metadata)
712 dict2hdfgroup(signal.original_metadata.as_dictionary(), original_par,
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
441 for key, value in dictionary.items():
442 if isinstance(value, dict):
--> 443 dict2hdfgroup(value, group.create_group(key),
444 **kwds)
445 elif isinstance(value, DictionaryTreeBrowser):
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
451 write_signal(value, group.require_group(kn))
452 elif isinstance(value, (np.ndarray, h5py.Dataset, da.Array)):
--> 453 overwrite_dataset(group, value, key, **kwds)
454 elif value is None:
455 group.attrs[key] = '_None_'
c:\users\thomasaar\documents\github\hyperspy\hyperspy\io_plugins\hspy.py in overwrite_dataset(group, data, key, signal_axes, chunks, **kwds)
574 # if the shape or dtype/etc do not match,
575 # we delete the old one and create new in the next loop run
--> 576 del group[key]
577 if dset == data:
578 # just a reference to already created thing
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
~\.conda\envs\py5\lib\site-packages\h5py\_hl\group.py in __delitem__(self, name)
397 def __delitem__(self, name):
398 """ Delete (unlink) an item from this group. """
--> 399 self.id.unlink(self._e(name))
400
401 @with_phil
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\h5g.pyx in h5py.h5g.GroupID.unlink()
KeyError: "Couldn't delete link (callback link pointer is NULL (specified link may be '.' or not exist))"```
|
TypeError
|
def get_lines_intensity(
self,
xray_lines=None,
integration_windows=2.0,
background_windows=None,
plot_result=False,
only_one=True,
only_lines=("a",),
**kwargs,
):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
as defined in 'energy_resolution_MnKa' of the metadata.
Backgrounds average in provided windows can be subtracted from the
intensities.
Parameters
----------
xray_lines: {None, Iterable* of strings}
If None,
if `metadata.Sample.elements.xray_lines` contains a
list of lines use those.
If `metadata.Sample.elements.xray_lines` is undefined
or empty but `metadata.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
* Note that while dictionaries and strings are iterable,
their use is ambiguous and specifically not allowed.
integration_windows: Float or array
If float, the width of the integration windows is the
'integration_windows_width' times the calculated FWHM of the line.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
background_windows: None or 2D array of float
If None, no background subtraction. Else, the backgrounds average
in the windows are subtracted from the return intensities.
'background_windows' provides the position of the windows in
energy. Each line corresponds to a X-ray line. In a line, the two
first values correspond to the limits of the left window and the
two last values correspond to the limits of the right window.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as BaseSignal subclasses.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)
Mn_La at 0.63316 keV : Intensity = 96700.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows=2.1)
>>> s.get_lines_intensity(['Mn_Ka'],
>>> integration_windows=2.1, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 53597.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_elements(['Mn'])
>>> s.set_lines(['Mn_Ka'])
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 46716.00
See also
--------
set_elements, add_elements, estimate_background_windows,
plot
"""
if xray_lines is not None and (
not isinstance(xray_lines, Iterable) or isinstance(xray_lines, (str, dict))
):
raise TypeError(
"xray_lines must be a compatible iterable, but was "
"mistakenly provided as a %s." % type(xray_lines)
)
xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)
if hasattr(integration_windows, "__iter__") is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows, xray_lines=xray_lines
)
intensities = []
ax = self.axes_manager.signal_axes[0]
# test Signal1D (0D problem)
# signal_to_index = self.axes_manager.navigation_dimension - 2
for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):
element, line = utils_eds._get_element_and_line(Xray_line)
line_energy = self._get_line_energy(Xray_line)
img = self.isig[window[0] : window[1]].integrate1D(-1)
if np.issubdtype(img.data.dtype, np.integer):
# The operations below require a float dtype with the default
# numpy casting rule ('same_kind')
img.change_dtype("float")
if background_windows is not None:
bw = background_windows[i]
# TODO: test to prevent slicing bug. To be reomved when fixed
indexes = [float(ax.value2index(de)) for de in list(bw) + window]
if indexes[0] == indexes[1]:
bck1 = self.isig[bw[0]]
else:
bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)
if indexes[2] == indexes[3]:
bck2 = self.isig[bw[2]]
else:
bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)
corr_factor = (indexes[5] - indexes[4]) / (
(indexes[1] - indexes[0]) + (indexes[3] - indexes[2])
)
img = img - (bck1 + bck2) * corr_factor
img.metadata.General.title = "X-ray line intensity of %s: %s at %.2f %s" % (
self.metadata.General.title,
Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
)
img.axes_manager.set_signal_dimension(0)
if plot_result and img.axes_manager.navigation_size == 1:
print(
"%s at %s %s : Intensity = %.2f"
% (Xray_line, line_energy, ax.units, img.data)
)
img.metadata.set_item("Sample.elements", ([element]))
img.metadata.set_item("Sample.xray_lines", ([Xray_line]))
intensities.append(img)
if plot_result and img.axes_manager.navigation_size != 1:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
|
def get_lines_intensity(
self,
xray_lines=None,
integration_windows=2.0,
background_windows=None,
plot_result=False,
only_one=True,
only_lines=("a",),
**kwargs,
):
"""Return the intensity map of selected Xray lines.
The intensities, the number of X-ray counts, are computed by
suming the spectrum over the
different X-ray lines. The sum window width
is calculated from the energy resolution of the detector
as defined in 'energy_resolution_MnKa' of the metadata.
Backgrounds average in provided windows can be subtracted from the
intensities.
Parameters
----------
xray_lines: {None, list of string}
If None,
if `metadata.Sample.elements.xray_lines` contains a
list of lines use those.
If `metadata.Sample.elements.xray_lines` is undefined
or empty but `metadata.Sample.elements` is defined,
use the same syntax as `add_line` to select a subset of lines
for the operation.
Alternatively, provide an iterable containing
a list of valid X-ray lines symbols.
integration_windows: Float or array
If float, the width of the integration windows is the
'integration_windows_width' times the calculated FWHM of the line.
Else provide an array for which each row corresponds to a X-ray
line. Each row contains the left and right value of the window.
background_windows: None or 2D array of float
If None, no background subtraction. Else, the backgrounds average
in the windows are subtracted from the return intensities.
'background_windows' provides the position of the windows in
energy. Each line corresponds to a X-ray line. In a line, the two
first values correspond to the limits of the left window and the
two last values correspond to the limits of the right window.
plot_result : bool
If True, plot the calculated line intensities. If the current
object is a single spectrum it prints the result instead.
only_one : bool
If False, use all the lines of each element in the data spectral
range. If True use only the line at the highest energy
above an overvoltage of 2 (< beam energy / 2).
only_lines : {None, list of strings}
If not None, use only the given lines.
kwargs
The extra keyword arguments for plotting. See
`utils.plot.plot_signals`
Returns
-------
intensities : list
A list containing the intensities as BaseSignal subclasses.
Examples
--------
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.get_lines_intensity(['Mn_Ka'], plot_result=True)
Mn_La at 0.63316 keV : Intensity = 96700.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.plot(['Mn_Ka'], integration_windows=2.1)
>>> s.get_lines_intensity(['Mn_Ka'],
>>> integration_windows=2.1, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 53597.00
>>> s = hs.datasets.example_signals.EDS_SEM_Spectrum()
>>> s.set_elements(['Mn'])
>>> s.set_lines(['Mn_Ka'])
>>> bw = s.estimate_background_windows()
>>> s.plot(background_windows=bw)
>>> s.get_lines_intensity(background_windows=bw, plot_result=True)
Mn_Ka at 5.8987 keV : Intensity = 46716.00
See also
--------
set_elements, add_elements, estimate_background_windows,
plot
"""
xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)
if hasattr(integration_windows, "__iter__") is False:
integration_windows = self.estimate_integration_windows(
windows_width=integration_windows, xray_lines=xray_lines
)
intensities = []
ax = self.axes_manager.signal_axes[0]
# test Signal1D (0D problem)
# signal_to_index = self.axes_manager.navigation_dimension - 2
for i, (Xray_line, window) in enumerate(zip(xray_lines, integration_windows)):
element, line = utils_eds._get_element_and_line(Xray_line)
line_energy = self._get_line_energy(Xray_line)
img = self.isig[window[0] : window[1]].integrate1D(-1)
if np.issubdtype(img.data.dtype, np.integer):
# The operations below require a float dtype with the default
# numpy casting rule ('same_kind')
img.change_dtype("float")
if background_windows is not None:
bw = background_windows[i]
# TODO: test to prevent slicing bug. To be reomved when fixed
indexes = [float(ax.value2index(de)) for de in list(bw) + window]
if indexes[0] == indexes[1]:
bck1 = self.isig[bw[0]]
else:
bck1 = self.isig[bw[0] : bw[1]].integrate1D(-1)
if indexes[2] == indexes[3]:
bck2 = self.isig[bw[2]]
else:
bck2 = self.isig[bw[2] : bw[3]].integrate1D(-1)
corr_factor = (indexes[5] - indexes[4]) / (
(indexes[1] - indexes[0]) + (indexes[3] - indexes[2])
)
img = img - (bck1 + bck2) * corr_factor
img.metadata.General.title = "X-ray line intensity of %s: %s at %.2f %s" % (
self.metadata.General.title,
Xray_line,
line_energy,
self.axes_manager.signal_axes[0].units,
)
img.axes_manager.set_signal_dimension(0)
if plot_result and img.axes_manager.navigation_size == 1:
print(
"%s at %s %s : Intensity = %.2f"
% (Xray_line, line_energy, ax.units, img.data)
)
img.metadata.set_item("Sample.elements", ([element]))
img.metadata.set_item("Sample.xray_lines", ([Xray_line]))
intensities.append(img)
if plot_result and img.axes_manager.navigation_size != 1:
utils.plot.plot_signals(intensities, **kwargs)
return intensities
|
https://github.com/hyperspy/hyperspy/issues/2448
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-14-9a0d450cf112> in <module>
1 s = hs.signals.Signal1D(np.random.random((10,100)))
2 s.set_signal_type("EDS_SEM")
----> 3 s.get_lines_intensity("Ca_Kb") # should raise KeyError
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in get_lines_intensity(self, xray_lines, integration_windows, background_windows, plot_result, only_one, only_lines, **kwargs)
618 """
619
--> 620 xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)
621 if hasattr(integration_windows, '__iter__') is False:
622 integration_windows = self.estimate_integration_windows(
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in _parse_xray_lines(self, xray_lines, only_one, only_lines)
523 only_lines=only_lines)
524 xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(
--> 525 xray_lines)
526 for xray in xray_not_here:
527 warnings.warn("%s is not in the data energy range." % xray +
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in _get_xray_lines_in_spectral_range(self, xray_lines)
156 xray_lines_not_in_range = []
157 for xray_line in xray_lines:
--> 158 line_energy = self._get_line_energy(xray_line)
159 if low_value < line_energy < high_value:
160 xray_lines_in_range.append(xray_line)
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in _get_line_energy(self, Xray_line, FWHM_MnKa)
87 "`set_signal_type(\"EDS_SEM\")` to convert to one of these"
88 "signal types.")
---> 89 line_energy = utils_eds._get_energy_xray_line(Xray_line)
90 if units_name == 'eV':
91 line_energy *= 1000
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/misc/eds/utils.py in _get_energy_xray_line(xray_line)
30 By example, if xray_line = 'Mn_Ka' this function returns 5.8987
31 """
---> 32 element, line = _get_element_and_line(xray_line)
33 return elements_db[element]['Atomic_properties']['Xray_lines'][
34 line]['energy (keV)']
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/misc/eds/utils.py in _get_element_and_line(xray_line)
20 lim = xray_line.find('_')
21 if lim == -1:
---> 22 raise ValueError("Invalid xray-line: %" % xray_line)
23 return xray_line[:lim], xray_line[lim + 1:]
24
ValueError: incomplete format
|
ValueError
|
def _unpack_data(self, file, encoding="latin-1"):
"""This needs to be special because it reads until the end of
file. This causes an error in the series of data"""
# Size of datapoints in bytes. Always int16 (==2) or 32 (==4)
Psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8)
dtype = np.int16 if Psize == 2 else np.int32
if self._get_work_dict_key_value("_01_Signature") != "DSCOMPRESSED":
# If the points are not compressed we need to read the exact
# size occupied by datapoints
# Datapoints in X and Y dimensions
Npts_tot = self._get_work_dict_key_value("_20_Total_Nb_of_Pts")
# Datasize in WL
Wsize = self._get_work_dict_key_value("_14_W_Size")
# We need to take into account the fact that Wsize is often
# set to 0 instead of 1 in non-spectral data to compute the
# space occupied by data in the file
readsize = Npts_tot * Psize
if Wsize != 0:
readsize *= Wsize
# if Npts_channel is not 0:
# readsize*=Npts_channel
# Read the exact size of the data
_points = np.frombuffer(file.read(readsize), dtype=dtype)
# _points = np.fromstring(file.read(readsize),dtype=dtype)
else:
# If the points are compressed do the uncompress magic. There
# the space occupied by datapoints is self-taken care of.
# Number of streams
_directoryCount = self._get_uint32(file)
# empty lists to store the read sizes
rawLengthData = []
zipLengthData = []
for i in range(_directoryCount):
# Size of raw and compressed data sizes in each stream
rawLengthData.append(self._get_uint32(file))
zipLengthData.append(self._get_uint32(file))
# We now initialize an empty binary string to store the results
rawData = b""
for i in range(_directoryCount):
# And for each stream we uncompress using zip lib
# and add it to raw string
rawData += zlib.decompress(file.read(zipLengthData[i]))
# Finally numpy converts it to a numeric object
_points = np.frombuffer(rawData, dtype=dtype)
# _points = np.fromstring(rawData, dtype=dtype)
# rescale data
# We set non measured points to nan according to .sur ways
nm = []
if self._get_work_dict_key_value("_11_Special_Points") == 1:
# has unmeasured points
nm = _points == self._get_work_dict_key_value("_16_Zmin") - 2
# We set the point in the numeric scale
_points = _points.astype(np.float) * self._get_work_dict_key_value(
"_23_Z_Spacing"
) * self._get_work_dict_key_value(
"_35_Z_Unit_Ratio"
) + self._get_work_dict_key_value("_55_Z_Offset")
_points[nm] = np.nan
# Return the points, rescaled
return _points
|
def _unpack_data(self, file, encoding="latin-1"):
"""This needs to be special because it reads until the end of
file. This causes an error in the series of data"""
# Size of datapoints in bytes. Always int16 (==2) or 32 (==4)
Psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8)
dtype = np.int16 if Psize == 2 else np.int32
if self._get_work_dict_key_value("_01_Signature") != "DSCOMPRESSED":
# If the points are not compressed we need to read the exact
# size occupied by datapoints
# Datapoints in X and Y dimensions
Npts_tot = self._get_work_dict_key_value("_20_Total_Nb_of_Pts")
# Datasize in WL
Wsize = self._get_work_dict_key_value("_14_W_Size")
# We need to take into account the fact that Wsize is often
# set to 0 instead of 1 in non-spectral data to compute the
# space occupied by data in the file
readsize = Npts_tot * Psize
if Wsize is not 0:
readsize *= Wsize
# if Npts_channel is not 0:
# readsize*=Npts_channel
# Read the exact size of the data
_points = np.frombuffer(file.read(readsize), dtype=dtype)
# _points = np.fromstring(file.read(readsize),dtype=dtype)
else:
# If the points are compressed do the uncompress magic. There
# the space occupied by datapoints is self-taken care of.
# Number of streams
_directoryCount = self._get_uint32(file)
# empty lists to store the read sizes
rawLengthData = []
zipLengthData = []
for i in range(_directoryCount):
# Size of raw and compressed data sizes in each stream
rawLengthData.append(self._get_uint32(file))
zipLengthData.append(self._get_uint32(file))
# We now initialize an empty binary string to store the results
rawData = b""
for i in range(_directoryCount):
# And for each stream we uncompress using zip lib
# and add it to raw string
rawData += zlib.decompress(file.read(zipLengthData[i]))
# Finally numpy converts it to a numeric object
_points = np.frombuffer(rawData, dtype=dtype)
# _points = np.fromstring(rawData, dtype=dtype)
# rescale data
# We set non measured points to nan according to .sur ways
nm = []
if self._get_work_dict_key_value("_11_Special_Points") == 1:
# has unmeasured points
nm = _points == self._get_work_dict_key_value("_16_Zmin") - 2
# We set the point in the numeric scale
_points = _points.astype(np.float) * self._get_work_dict_key_value(
"_23_Z_Spacing"
) * self._get_work_dict_key_value(
"_35_Z_Unit_Ratio"
) + self._get_work_dict_key_value("_55_Z_Offset")
_points[nm] = np.nan
# Return the points, rescaled
return _points
|
https://github.com/hyperspy/hyperspy/issues/2448
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-14-9a0d450cf112> in <module>
1 s = hs.signals.Signal1D(np.random.random((10,100)))
2 s.set_signal_type("EDS_SEM")
----> 3 s.get_lines_intensity("Ca_Kb") # should raise KeyError
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in get_lines_intensity(self, xray_lines, integration_windows, background_windows, plot_result, only_one, only_lines, **kwargs)
618 """
619
--> 620 xray_lines = self._parse_xray_lines(xray_lines, only_one, only_lines)
621 if hasattr(integration_windows, '__iter__') is False:
622 integration_windows = self.estimate_integration_windows(
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in _parse_xray_lines(self, xray_lines, only_one, only_lines)
523 only_lines=only_lines)
524 xray_lines, xray_not_here = self._get_xray_lines_in_spectral_range(
--> 525 xray_lines)
526 for xray in xray_not_here:
527 warnings.warn("%s is not in the data energy range." % xray +
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in _get_xray_lines_in_spectral_range(self, xray_lines)
156 xray_lines_not_in_range = []
157 for xray_line in xray_lines:
--> 158 line_energy = self._get_line_energy(xray_line)
159 if low_value < line_energy < high_value:
160 xray_lines_in_range.append(xray_line)
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/_signals/eds.py in _get_line_energy(self, Xray_line, FWHM_MnKa)
87 "`set_signal_type(\"EDS_SEM\")` to convert to one of these"
88 "signal types.")
---> 89 line_energy = utils_eds._get_energy_xray_line(Xray_line)
90 if units_name == 'eV':
91 line_energy *= 1000
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/misc/eds/utils.py in _get_energy_xray_line(xray_line)
30 By example, if xray_line = 'Mn_Ka' this function returns 5.8987
31 """
---> 32 element, line = _get_element_and_line(xray_line)
33 return elements_db[element]['Atomic_properties']['Xray_lines'][
34 line]['energy (keV)']
~/.local/anaconda3/lib/python3.7/site-packages/hyperspy/misc/eds/utils.py in _get_element_and_line(xray_line)
20 lim = xray_line.find('_')
21 if lim == -1:
---> 22 raise ValueError("Invalid xray-line: %" % xray_line)
23 return xray_line[:lim], xray_line[lim + 1:]
24
ValueError: incomplete format
|
ValueError
|
def plot_decomposition_factors(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
cmap=plt.cm.gray,
per_row=3,
title=None,
):
"""Plot factors from a decomposition. In case of 1D signal axis, each
factors line can be toggled on and off by clicking on their
corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`. Otherwise it
raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_decomposition_loadings, plot_decomposition_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError(
"This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead."
)
if self.learning_results.factors is None:
raise RuntimeError(
"No learning results found. A 'decomposition' needs to be performed first."
)
if same_window is None:
same_window = True
factors = self.learning_results.factors
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument"
)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("Decomposition factors of", same_window)
return self._plot_factors_or_pchars(
factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
cmap=cmap,
per_row=per_row,
)
|
def plot_decomposition_factors(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
cmap=plt.cm.gray,
per_row=3,
title=None,
):
"""Plot factors from a decomposition. In case of 1D signal axis, each
factors line can be toggled on and off by clicking on their
corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`. Otherwise it
raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_decomposition_loadings, plot_decomposition_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError(
"This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead."
)
if same_window is None:
same_window = True
factors = self.learning_results.factors
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument"
)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("Decomposition factors of", same_window)
return self._plot_factors_or_pchars(
factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
cmap=cmap,
per_row=per_row,
)
|
https://github.com/hyperspy/hyperspy/issues/1672
|
s = hs.load(signal)
s.plot_decomposition_results()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-7c3b0ba4e00e> in <module>()
----> 1 s.plot_decomposition_results()
c:\users\thomasaar\hyperspy\hyperspy\signal.py in plot_decomposition_results(self, factors_navigator, loadings_navigator, factors_dim, loadings_dim)
1472
1473 """
-> 1474 factors = self.get_decomposition_factors()
1475 loadings = self.get_decomposition_loadings()
1476 _plot_x_results(factors=factors, loadings=loadings,
c:\users\thomasaar\hyperspy\hyperspy\signal.py in get_decomposition_factors(self)
1363
1364 """
-> 1365 signal = self._get_factors(self.learning_results.factors)
1366 signal.axes_manager._axes[0].name = "Decomposition component index"
1367 signal.metadata.General.title = ("Decomposition factors of " +
c:\users\thomasaar\hyperspy\hyperspy\signal.py in _get_factors(self, factors)
1333 def _get_factors(self, factors):
1334 signal = self.__class__(
-> 1335 factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
1336 axes=[{"size": factors.shape[-1], "navigate": True}] +
1337 self.axes_manager._get_signal_axes_dicts())
AttributeError: 'NoneType' object has no attribute 'T'
|
AttributeError
|
def plot_bss_factors(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
per_row=3,
title=None,
):
"""Plot factors from blind source separation results. In case of 1D
signal axis, each factors line can be toggled on and off by clicking
on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
comp_label : str
Will be deprecated in 2.0, please use `title` instead
title : str
Title of the plot.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_bss_loadings, plot_bss_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError(
"This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead."
)
if self.learning_results.bss_factors is None:
raise RuntimeError(
"No learning results found. A "
"'blind_source_separation' needs to be "
"performed first."
)
if same_window is None:
same_window = True
factors = self.learning_results.bss_factors
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("BSS factors of", same_window)
return self._plot_factors_or_pchars(
factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
per_row=per_row,
)
|
def plot_bss_factors(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
per_row=3,
title=None,
):
"""Plot factors from blind source separation results. In case of 1D
signal axis, each factors line can be toggled on and off by clicking
on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
If ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
comp_label : str
Will be deprecated in 2.0, please use `title` instead
title : str
Title of the plot.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
See also
--------
plot_bss_loadings, plot_bss_results
"""
if self.axes_manager.signal_dimension > 2:
raise NotImplementedError(
"This method cannot plot factors of "
"signals of dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead."
)
if same_window is None:
same_window = True
factors = self.learning_results.bss_factors
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("BSS factors of", same_window)
return self._plot_factors_or_pchars(
factors,
comp_ids=comp_ids,
calibrate=calibrate,
same_window=same_window,
comp_label=title,
per_row=per_row,
)
|
https://github.com/hyperspy/hyperspy/issues/1672
|
s = hs.load(signal)
s.plot_decomposition_results()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-7c3b0ba4e00e> in <module>()
----> 1 s.plot_decomposition_results()
c:\users\thomasaar\hyperspy\hyperspy\signal.py in plot_decomposition_results(self, factors_navigator, loadings_navigator, factors_dim, loadings_dim)
1472
1473 """
-> 1474 factors = self.get_decomposition_factors()
1475 loadings = self.get_decomposition_loadings()
1476 _plot_x_results(factors=factors, loadings=loadings,
c:\users\thomasaar\hyperspy\hyperspy\signal.py in get_decomposition_factors(self)
1363
1364 """
-> 1365 signal = self._get_factors(self.learning_results.factors)
1366 signal.axes_manager._axes[0].name = "Decomposition component index"
1367 signal.metadata.General.title = ("Decomposition factors of " +
c:\users\thomasaar\hyperspy\hyperspy\signal.py in _get_factors(self, factors)
1333 def _get_factors(self, factors):
1334 signal = self.__class__(
-> 1335 factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
1336 axes=[{"size": factors.shape[-1], "navigate": True}] +
1337 self.axes_manager._get_signal_axes_dicts())
AttributeError: 'NoneType' object has no attribute 'T'
|
AttributeError
|
def plot_decomposition_loadings(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor="all",
title=None,
):
"""Plot loadings from a decomposition. In case of 1D navigation axis,
each loading line can be toggled on and off by clicking on the legended
line.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`.
Otherwise it raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
with_factors : bool
If ``True``, also returns figure(s) with the factors for the
given comp_ids.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image; default is
``'all'``
If ``'all'``, both ticks and axis labels will be shown.
If ``'ticks'``, no axis labels will be shown, but ticks/labels will.
If ``'off'``, all decorations and frame will be disabled.
If ``None``, no axis decorations will be shown, but ticks/frame
will.
See also
--------
plot_decomposition_factors, plot_decomposition_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError(
"This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead."
)
if self.learning_results.loadings is None:
raise RuntimeError(
"No learning results found. A 'decomposition' needs to be performed first."
)
if same_window is None:
same_window = True
loadings = self.learning_results.loadings.T
if with_factors:
factors = self.learning_results.factors
else:
factors = None
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument"
)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("Decomposition loadings of", same_window)
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor,
)
|
def plot_decomposition_loadings(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor="all",
title=None,
):
"""Plot loadings from a decomposition. In case of 1D navigation axis,
each loading line can be toggled on and off by clicking on the legended
line.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned if the `output_dimension` was defined when executing
:py:meth:`~hyperspy.learn.mva.MVA.decomposition`.
Otherwise it raises a :py:exc:`ValueError`.
If `comp_ids` is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
if ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
title : str
Title of the plot.
with_factors : bool
If ``True``, also returns figure(s) with the factors for the
given comp_ids.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image; default is
``'all'``
If ``'all'``, both ticks and axis labels will be shown.
If ``'ticks'``, no axis labels will be shown, but ticks/labels will.
If ``'off'``, all decorations and frame will be disabled.
If ``None``, no axis decorations will be shown, but ticks/frame
will.
See also
--------
plot_decomposition_factors, plot_decomposition_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError(
"This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_decomposition_results` instead."
)
if same_window is None:
same_window = True
loadings = self.learning_results.loadings.T
if with_factors:
factors = self.learning_results.factors
else:
factors = None
if comp_ids is None:
if self.learning_results.output_dimension:
comp_ids = self.learning_results.output_dimension
else:
raise ValueError(
"Please provide the number of components to plot via the "
"`comp_ids` argument"
)
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("Decomposition loadings of", same_window)
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor,
)
|
https://github.com/hyperspy/hyperspy/issues/1672
|
s = hs.load(signal)
s.plot_decomposition_results()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-7c3b0ba4e00e> in <module>()
----> 1 s.plot_decomposition_results()
c:\users\thomasaar\hyperspy\hyperspy\signal.py in plot_decomposition_results(self, factors_navigator, loadings_navigator, factors_dim, loadings_dim)
1472
1473 """
-> 1474 factors = self.get_decomposition_factors()
1475 loadings = self.get_decomposition_loadings()
1476 _plot_x_results(factors=factors, loadings=loadings,
c:\users\thomasaar\hyperspy\hyperspy\signal.py in get_decomposition_factors(self)
1363
1364 """
-> 1365 signal = self._get_factors(self.learning_results.factors)
1366 signal.axes_manager._axes[0].name = "Decomposition component index"
1367 signal.metadata.General.title = ("Decomposition factors of " +
c:\users\thomasaar\hyperspy\hyperspy\signal.py in _get_factors(self, factors)
1333 def _get_factors(self, factors):
1334 signal = self.__class__(
-> 1335 factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
1336 axes=[{"size": factors.shape[-1], "navigate": True}] +
1337 self.axes_manager._get_signal_axes_dicts())
AttributeError: 'NoneType' object has no attribute 'T'
|
AttributeError
|
def plot_bss_loadings(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor="all",
title=None,
):
"""Plot loadings from blind source separation results. In case of 1D
navigation axis, each loading line can be toggled on and off by
clicking on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
comp_label : str
Will be deprecated in 2.0, please use `title` instead
title : str
Title of the plot.
with_factors : bool
If `True`, also returns figure(s) with the factors for the
given `comp_ids`.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image;
default is ``'all'``
If ``'all'``, both ticks and axis labels will be shown
If ``'ticks'``, no axis labels will be shown, but ticks/labels will
If ``'off'``, all decorations and frame will be disabled
If ``None``, no axis decorations will be shown, but ticks/frame will
See also
--------
plot_bss_factors, plot_bss_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError(
"This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_bss_results` instead."
)
if self.learning_results.bss_loadings is None:
raise RuntimeError(
"No learning results found. A "
"'blind_source_separation' needs to be "
"performed first."
)
if same_window is None:
same_window = True
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("BSS loadings of", same_window)
loadings = self.learning_results.bss_loadings.T
if with_factors:
factors = self.learning_results.bss_factors
else:
factors = None
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor,
)
|
def plot_bss_loadings(
self,
comp_ids=None,
calibrate=True,
same_window=True,
comp_label=None,
with_factors=False,
cmap=plt.cm.gray,
no_nans=False,
per_row=3,
axes_decor="all",
title=None,
):
"""Plot loadings from blind source separation results. In case of 1D
navigation axis, each loading line can be toggled on and off by
clicking on their corresponding line in the legend.
Parameters
----------
comp_ids : None, int, or list (of ints)
If `comp_ids` is ``None``, maps of all components will be
returned. If it is an int, maps of components with ids from 0 to
the given value will be returned. If `comp_ids` is a list of
ints, maps of components with ids contained in the list will be
returned.
calibrate : bool
if ``True``, calibrates plots where calibration is available
from the axes_manager. If ``False``, plots are in pixels/channels.
same_window : bool
If ``True``, plots each factor to the same window. They are
not scaled. Default is ``True``.
comp_label : str
Will be deprecated in 2.0, please use `title` instead
title : str
Title of the plot.
with_factors : bool
If `True`, also returns figure(s) with the factors for the
given `comp_ids`.
cmap : :py:class:`~matplotlib.colors.Colormap`
The colormap used for the factor image, or for peak
characteristics, the colormap used for the scatter plot of
some peak characteristic.
no_nans : bool
If ``True``, removes ``NaN``'s from the loading plots.
per_row : int
The number of plots in each row, when the `same_window`
parameter is ``True``.
axes_decor : str or None, optional
One of: ``'all'``, ``'ticks'``, ``'off'``, or ``None``
Controls how the axes are displayed on each image;
default is ``'all'``
If ``'all'``, both ticks and axis labels will be shown
If ``'ticks'``, no axis labels will be shown, but ticks/labels will
If ``'off'``, all decorations and frame will be disabled
If ``None``, no axis decorations will be shown, but ticks/frame will
See also
--------
plot_bss_factors, plot_bss_results
"""
if self.axes_manager.navigation_dimension > 2:
raise NotImplementedError(
"This method cannot plot loadings of "
"dimension higher than 2."
"You can use "
"`plot_bss_results` instead."
)
if same_window is None:
same_window = True
title = _change_API_comp_label(title, comp_label)
if title is None:
title = self._get_plot_title("BSS loadings of", same_window)
loadings = self.learning_results.bss_loadings.T
if with_factors:
factors = self.learning_results.bss_factors
else:
factors = None
return self._plot_loadings(
loadings,
comp_ids=comp_ids,
with_factors=with_factors,
factors=factors,
same_window=same_window,
comp_label=title,
cmap=cmap,
no_nans=no_nans,
per_row=per_row,
axes_decor=axes_decor,
)
|
https://github.com/hyperspy/hyperspy/issues/1672
|
s = hs.load(signal)
s.plot_decomposition_results()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-7c3b0ba4e00e> in <module>()
----> 1 s.plot_decomposition_results()
c:\users\thomasaar\hyperspy\hyperspy\signal.py in plot_decomposition_results(self, factors_navigator, loadings_navigator, factors_dim, loadings_dim)
1472
1473 """
-> 1474 factors = self.get_decomposition_factors()
1475 loadings = self.get_decomposition_loadings()
1476 _plot_x_results(factors=factors, loadings=loadings,
c:\users\thomasaar\hyperspy\hyperspy\signal.py in get_decomposition_factors(self)
1363
1364 """
-> 1365 signal = self._get_factors(self.learning_results.factors)
1366 signal.axes_manager._axes[0].name = "Decomposition component index"
1367 signal.metadata.General.title = ("Decomposition factors of " +
c:\users\thomasaar\hyperspy\hyperspy\signal.py in _get_factors(self, factors)
1333 def _get_factors(self, factors):
1334 signal = self.__class__(
-> 1335 factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
1336 axes=[{"size": factors.shape[-1], "navigate": True}] +
1337 self.axes_manager._get_signal_axes_dicts())
AttributeError: 'NoneType' object has no attribute 'T'
|
AttributeError
|
def _get_loadings(self, loadings):
if loadings is None:
raise RuntimeError("No learning results found.")
from hyperspy.api import signals
data = loadings.T.reshape((-1,) + self.axes_manager.navigation_shape[::-1])
signal = signals.BaseSignal(
data,
axes=(
[{"size": data.shape[0], "navigate": True}]
+ self.axes_manager._get_navigation_axes_dicts()
),
)
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
|
def _get_loadings(self, loadings):
from hyperspy.api import signals
data = loadings.T.reshape((-1,) + self.axes_manager.navigation_shape[::-1])
signal = signals.BaseSignal(
data,
axes=(
[{"size": data.shape[0], "navigate": True}]
+ self.axes_manager._get_navigation_axes_dicts()
),
)
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
|
https://github.com/hyperspy/hyperspy/issues/1672
|
s = hs.load(signal)
s.plot_decomposition_results()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-7c3b0ba4e00e> in <module>()
----> 1 s.plot_decomposition_results()
c:\users\thomasaar\hyperspy\hyperspy\signal.py in plot_decomposition_results(self, factors_navigator, loadings_navigator, factors_dim, loadings_dim)
1472
1473 """
-> 1474 factors = self.get_decomposition_factors()
1475 loadings = self.get_decomposition_loadings()
1476 _plot_x_results(factors=factors, loadings=loadings,
c:\users\thomasaar\hyperspy\hyperspy\signal.py in get_decomposition_factors(self)
1363
1364 """
-> 1365 signal = self._get_factors(self.learning_results.factors)
1366 signal.axes_manager._axes[0].name = "Decomposition component index"
1367 signal.metadata.General.title = ("Decomposition factors of " +
c:\users\thomasaar\hyperspy\hyperspy\signal.py in _get_factors(self, factors)
1333 def _get_factors(self, factors):
1334 signal = self.__class__(
-> 1335 factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
1336 axes=[{"size": factors.shape[-1], "navigate": True}] +
1337 self.axes_manager._get_signal_axes_dicts())
AttributeError: 'NoneType' object has no attribute 'T'
|
AttributeError
|
def _get_factors(self, factors):
if factors is None:
raise RuntimeError("No learning results found.")
signal = self.__class__(
factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
axes=[{"size": factors.shape[-1], "navigate": True}]
+ self.axes_manager._get_signal_axes_dicts(),
)
signal.set_signal_type(self.metadata.Signal.signal_type)
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
|
def _get_factors(self, factors):
signal = self.__class__(
factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
axes=[{"size": factors.shape[-1], "navigate": True}]
+ self.axes_manager._get_signal_axes_dicts(),
)
signal.set_signal_type(self.metadata.Signal.signal_type)
for axis in signal.axes_manager._axes[1:]:
axis.navigate = False
return signal
|
https://github.com/hyperspy/hyperspy/issues/1672
|
s = hs.load(signal)
s.plot_decomposition_results()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-7c3b0ba4e00e> in <module>()
----> 1 s.plot_decomposition_results()
c:\users\thomasaar\hyperspy\hyperspy\signal.py in plot_decomposition_results(self, factors_navigator, loadings_navigator, factors_dim, loadings_dim)
1472
1473 """
-> 1474 factors = self.get_decomposition_factors()
1475 loadings = self.get_decomposition_loadings()
1476 _plot_x_results(factors=factors, loadings=loadings,
c:\users\thomasaar\hyperspy\hyperspy\signal.py in get_decomposition_factors(self)
1363
1364 """
-> 1365 signal = self._get_factors(self.learning_results.factors)
1366 signal.axes_manager._axes[0].name = "Decomposition component index"
1367 signal.metadata.General.title = ("Decomposition factors of " +
c:\users\thomasaar\hyperspy\hyperspy\signal.py in _get_factors(self, factors)
1333 def _get_factors(self, factors):
1334 signal = self.__class__(
-> 1335 factors.T.reshape((-1,) + self.axes_manager.signal_shape[::-1]),
1336 axes=[{"size": factors.shape[-1], "navigate": True}] +
1337 self.axes_manager._get_signal_axes_dicts())
AttributeError: 'NoneType' object has no attribute 'T'
|
AttributeError
|
def calc_real_time(self):
"""calculate and return real time for whole hypermap
in seconds
"""
line_cnt_sum = np.sum(self.line_counter, dtype=np.float64)
line_avg = self.dsp_metadata["LineAverage"]
pix_avg = self.dsp_metadata["PixelAverage"]
pix_time = self.dsp_metadata["PixelTime"]
width = self.image.width
real_time = line_cnt_sum * line_avg * pix_avg * pix_time * width * 1e-6
return float(real_time)
|
def calc_real_time(self):
"""calculate and return real time for whole hypermap
in seconds
"""
line_cnt_sum = np.sum(self.line_counter)
line_avg = self.dsp_metadata["LineAverage"]
pix_avg = self.dsp_metadata["PixelAverage"]
pix_time = self.dsp_metadata["PixelTime"]
width = self.image.width
real_time = line_cnt_sum * line_avg * pix_avg * pix_time * width / 1000000.0
return float(real_time)
|
https://github.com/hyperspy/hyperspy/issues/2244
|
KeyError Traceback (most recent call last)
<ipython-input-6-ad1ad3391a64> in <module>
----> 1 b=hs.load('20190723_bon_accord_BCF_data/Mapping_(1,1).bcf')
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, convert_units, **kwds)
279 objects = [load_single_file(filename, lazy=lazy,
280 **kwds)
--> 281 for filename in filenames]
282
283 if len(objects) == 1:`
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
<listcomp>(.0)
279 objects = [load_single_file(filename, lazy=lazy,
280 **kwds)
--> 281 for filename in filenames]
282
283 if len(objects) == 1:
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
load_single_file(filename, **kwds)
316 else:
317 reader = io_plugins[i]
--> 318 return load_with_reader(filename=filename, reader=reader, **kwds)
319
320
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
load_with_reader(filename, reader, signal_type, convert_units, **kwds)
323 lazy = kwds.get('lazy', False)
324 file_data_list = reader.file_reader(filename,
--> 325 **kwds)
326 objects = []
327
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in file_reader(filename, *args, **kwds)
1189 ext = splitext(filename)[1][1:]
1190 if ext == 'bcf':
-> 1191 return bcf_reader(filename, *args, **kwds)
1192 elif ext == 'spx':
1193 return spx_reader(filename, *args, **kwds)
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in bcf_reader(filename, select_type, index,
downsample, cutoff_at_kV, instrument, lazy)
1217
1218 # objectified bcf file:
-> 1219 obj_bcf = BCF_reader(filename, instrument=instrument)
1220 if select_type == 'spectrum':
1221 select_type = 'spectrum_image'
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in __init__(self, filename, instrument)
883 hd_bt_str = fix_dec_patterns.sub(b'\\1.\\2', header_byte_str)
884 self.header = HyperHeader(
--> 885 hd_bt_str, self.available_indexes, instrument=instrument)
886 self.hypermap = {}
887
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in __init__(self, xml_str, indexes, instrument)
586 self._set_images(root)
587 self.elements = {}
--> 588 self._set_elements(root)
589 self.line_counter = interpret(root.find('./LineCounter').text)
590 self.channel_count = int(root.find('./ChCount').text)
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in _set_elements(self, root)
729 self.elements[tmp_d['XmlClassName']] = {'line': tmp_d['Line'],
730 'energy': tmp_d['Energy'],
--> 731 'width': tmp_d['Width']}
732 except AttributeError:
733 _logger.info('no element selection present in the spectra..')
KeyError: 'Width'`
|
KeyError
|
def _set_elements(self, root):
"""wrap objectified xml part with selection of elements to
self.elements list
"""
try:
elements = root.find(
"./ClassInstance[@Type='TRTContainerClass']"
"/ChildClassInstances"
"/ClassInstance[@Type='TRTElementInformationList']"
"/ClassInstance[@Type='TRTSpectrumRegionList']"
"/ChildClassInstances"
)
for j in elements.findall("./ClassInstance[@Type='TRTSpectrumRegion']"):
tmp_d = dictionarize(j)
self.elements[tmp_d["XmlClassName"]] = {
"line": tmp_d["Line"],
"energy": tmp_d["Energy"],
}
except AttributeError:
_logger.info("no element selection present in the spectra..")
|
def _set_elements(self, root):
"""wrap objectified xml part with selection of elements to
self.elements list
"""
try:
elements = root.find(
"./ClassInstance[@Type='TRTContainerClass']"
"/ChildClassInstances"
"/ClassInstance[@Type='TRTElementInformationList']"
"/ClassInstance[@Type='TRTSpectrumRegionList']"
"/ChildClassInstances"
)
for j in elements.findall("./ClassInstance[@Type='TRTSpectrumRegion']"):
tmp_d = dictionarize(j)
self.elements[tmp_d["XmlClassName"]] = {
"line": tmp_d["Line"],
"energy": tmp_d["Energy"],
"width": tmp_d["Width"],
}
except AttributeError:
_logger.info("no element selection present in the spectra..")
|
https://github.com/hyperspy/hyperspy/issues/2244
|
KeyError Traceback (most recent call last)
<ipython-input-6-ad1ad3391a64> in <module>
----> 1 b=hs.load('20190723_bon_accord_BCF_data/Mapping_(1,1).bcf')
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, convert_units, **kwds)
279 objects = [load_single_file(filename, lazy=lazy,
280 **kwds)
--> 281 for filename in filenames]
282
283 if len(objects) == 1:`
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
<listcomp>(.0)
279 objects = [load_single_file(filename, lazy=lazy,
280 **kwds)
--> 281 for filename in filenames]
282
283 if len(objects) == 1:
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
load_single_file(filename, **kwds)
316 else:
317 reader = io_plugins[i]
--> 318 return load_with_reader(filename=filename, reader=reader, **kwds)
319
320
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-packages\hyperspy\io.py in
load_with_reader(filename, reader, signal_type, convert_units, **kwds)
323 lazy = kwds.get('lazy', False)
324 file_data_list = reader.file_reader(filename,
--> 325 **kwds)
326 objects = []
327
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in file_reader(filename, *args, **kwds)
1189 ext = splitext(filename)[1][1:]
1190 if ext == 'bcf':
-> 1191 return bcf_reader(filename, *args, **kwds)
1192 elif ext == 'spx':
1193 return spx_reader(filename, *args, **kwds)
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in bcf_reader(filename, select_type, index,
downsample, cutoff_at_kV, instrument, lazy)
1217
1218 # objectified bcf file:
-> 1219 obj_bcf = BCF_reader(filename, instrument=instrument)
1220 if select_type == 'spectrum':
1221 select_type = 'spectrum_image'
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in __init__(self, filename, instrument)
883 hd_bt_str = fix_dec_patterns.sub(b'\\1.\\2', header_byte_str)
884 self.header = HyperHeader(
--> 885 hd_bt_str, self.available_indexes, instrument=instrument)
886 self.hypermap = {}
887
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in __init__(self, xml_str, indexes, instrument)
586 self._set_images(root)
587 self.elements = {}
--> 588 self._set_elements(root)
589 self.line_counter = interpret(root.find('./LineCounter').text)
590 self.channel_count = int(root.find('./ChCount').text)
~\AppData\Local\conda\conda\envs\hyperspy\lib\site-
packages\hyperspy\io_plugins\bruker.py in _set_elements(self, root)
729 self.elements[tmp_d['XmlClassName']] = {'line': tmp_d['Line'],
730 'energy': tmp_d['Energy'],
--> 731 'width': tmp_d['Width']}
732 except AttributeError:
733 _logger.info('no element selection present in the spectra..')
KeyError: 'Width'`
|
KeyError
|
def __init__(
self, A=1e-5, r=3.0, origin=0.0, shift=20.0, ratio=1.0, module="numexpr", **kwargs
):
super(DoublePowerLaw, self).__init__(
expression="A * (ratio * (x - origin - shift) ** -r + (x - origin) ** -r)",
name="DoublePowerLaw",
A=A,
r=r,
origin=origin,
shift=shift,
ratio=ratio,
position="origin",
autodoc=True,
module=module,
**kwargs,
)
self.origin.free = False
self.shift.value = 20.0
self.shift.free = False
self.left_cutoff = 0.0 # in x-units
# Boundaries
self.A.bmin = 0.0
self.A.bmax = None
self.r.bmin = 1.0
self.r.bmax = 5.0
self.isbackground = True
self.convolved = False
|
def __init__(self, A=1e-5, r=3.0, origin=0.0, shift=20.0, ratio=1.0, **kwargs):
super(DoublePowerLaw, self).__init__(
expression="A * (ratio * (x - origin - shift) ** -r + (x - origin) ** -r)",
name="DoublePowerLaw",
A=A,
r=r,
origin=origin,
shift=shift,
ratio=ratio,
position="origin",
autodoc=True,
**kwargs,
)
self.origin.free = False
self.shift.value = 20.0
self.shift.free = False
self.left_cutoff = 0.0 # in x-units
# Boundaries
self.A.bmin = 0.0
self.A.bmax = None
self.r.bmin = 1.0
self.r.bmax = 5.0
self.isbackground = True
self.convolved = False
|
https://github.com/hyperspy/hyperspy/issues/2129
|
Traceback (most recent call last):
File "ReproduceBug.py", line 17, in <module>
FeSub = FeL.remove_background(signal_range=(670., 695.), background_type='Polynomial', polynomial_order=1)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1099, in remove_background
show_progressbar=show_progressbar)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1040, in _remove_background_cli
background_estimator.function_nd(axis))
ValueError: operands could not be broadcast together with shapes (35,35,2048) (35,2,2048)
|
ValueError
|
def function_nd(self, *args):
"""%s"""
if self._is2D:
x, y = args[0], args[1]
# navigation dimension is 0, f_nd same as f
if not self._is_navigation_multidimensional:
return self.function(x, y)
else:
return self._f(
x[np.newaxis, ...],
y[np.newaxis, ...],
*[
p.map["values"][..., np.newaxis, np.newaxis]
for p in self.parameters
],
)
else:
x = args[0]
if not self._is_navigation_multidimensional:
return self.function(x)
else:
return self._f(
x[np.newaxis, ...],
*[p.map["values"][..., np.newaxis] for p in self.parameters],
)
|
def function_nd(self, *args):
"""%s"""
if self._is2D:
x, y = args[0], args[1]
# navigation dimension is 0, f_nd same as f
if not self._is_navigation_multidimensional():
return self.function(x, y)
else:
return self._f(
x[np.newaxis, ...],
y[np.newaxis, ...],
*[
p.map["values"][..., np.newaxis, np.newaxis]
for p in self.parameters
],
)
else:
x = args[0]
if not self._is_navigation_multidimensional():
return self.function(x)
else:
return self._f(
x[np.newaxis, ...],
*[p.map["values"][..., np.newaxis] for p in self.parameters],
)
|
https://github.com/hyperspy/hyperspy/issues/2129
|
Traceback (most recent call last):
File "ReproduceBug.py", line 17, in <module>
FeSub = FeL.remove_background(signal_range=(670., 695.), background_type='Polynomial', polynomial_order=1)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1099, in remove_background
show_progressbar=show_progressbar)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1040, in _remove_background_cli
background_estimator.function_nd(axis))
ValueError: operands could not be broadcast together with shapes (35,35,2048) (35,2,2048)
|
ValueError
|
def function_nd(self, axis):
"""%s"""
if self._is_navigation_multidimensional:
x = axis[np.newaxis, :]
o = self.offset.map["values"][..., np.newaxis]
else:
x = axis
o = self.offset.value
return self._function(x, o)
|
def function_nd(self, axis):
"""%s"""
x = axis[np.newaxis, :]
o = self.offset.map["values"][..., np.newaxis]
return self._function(x, o)
|
https://github.com/hyperspy/hyperspy/issues/2129
|
Traceback (most recent call last):
File "ReproduceBug.py", line 17, in <module>
FeSub = FeL.remove_background(signal_range=(670., 695.), background_type='Polynomial', polynomial_order=1)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1099, in remove_background
show_progressbar=show_progressbar)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1040, in _remove_background_cli
background_estimator.function_nd(axis))
ValueError: operands could not be broadcast together with shapes (35,35,2048) (35,2,2048)
|
ValueError
|
def _remove_background_cli(
self,
signal_range,
background_estimator,
fast=True,
zero_fill=False,
show_progressbar=None,
):
signal_range = signal_range_from_roi(signal_range)
from hyperspy.models.model1d import Model1D
model = Model1D(self)
model.append(background_estimator)
background_estimator.estimate_parameters(
self, signal_range[0], signal_range[1], only_current=False
)
if fast and not self._lazy:
try:
axis = self.axes_manager.signal_axes[0].axis
result = self - background_estimator.function_nd(axis)
except MemoryError:
result = self - model.as_signal(show_progressbar=show_progressbar)
else:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar)
model.reset_signal_range()
result = self - model.as_signal(show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[: signal_range[0]] = 0
return result
|
def _remove_background_cli(
self,
signal_range,
background_estimator,
fast=True,
zero_fill=False,
show_progressbar=None,
):
signal_range = signal_range_from_roi(signal_range)
from hyperspy.models.model1d import Model1D
model = Model1D(self)
model.append(background_estimator)
background_estimator.estimate_parameters(
self, signal_range[0], signal_range[1], only_current=False
)
if fast and not self._lazy:
try:
axis = self.axes_manager.signal_axes[0].axis
result = Signal1D(self.data - background_estimator.function_nd(axis))
except MemoryError:
result = self - model.as_signal(show_progressbar=show_progressbar)
else:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar)
model.reset_signal_range()
result = self - model.as_signal(show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[: signal_range[0]] = 0
return result
|
https://github.com/hyperspy/hyperspy/issues/2129
|
Traceback (most recent call last):
File "ReproduceBug.py", line 17, in <module>
FeSub = FeL.remove_background(signal_range=(670., 695.), background_type='Polynomial', polynomial_order=1)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1099, in remove_background
show_progressbar=show_progressbar)
File "/Users/Zack/anaconda3/envs/conda37/lib/python3.7/site-packages/hyperspy/_signals/signal1d.py", line 1040, in _remove_background_cli
background_estimator.function_nd(axis))
ValueError: operands could not be broadcast together with shapes (35,35,2048) (35,2,2048)
|
ValueError
|
def parse_header(self):
self.dm_version = iou.read_long(self.f, "big")
if self.dm_version not in (3, 4):
raise NotImplementedError(
"Currently we only support reading DM versions 3 and 4 but "
"this file "
"seems to be version %s " % self.dm_version
)
filesizeB = self.read_l_or_q(self.f, "big")
is_little_endian = iou.read_long(self.f, "big")
_logger.info("DM version: %i", self.dm_version)
_logger.info("size %i B", filesizeB)
_logger.info("Is file Little endian? %s", bool(is_little_endian))
if bool(is_little_endian):
self.endian = "little"
else:
self.endian = "big"
|
def parse_header(self):
self.dm_version = iou.read_long(self.f, "big")
if self.dm_version not in (3, 4):
raise NotImplementedError(
"Currently we only support reading DM versions 3 and 4 but "
"this file "
"seems to be version %s " % self.dm_version
)
self.skipif4()
filesizeB = iou.read_long(self.f, "big")
is_little_endian = iou.read_long(self.f, "big")
_logger.info("DM version: %i", self.dm_version)
_logger.info("size %i B", filesizeB)
_logger.info("Is file Little endian? %s", bool(is_little_endian))
if bool(is_little_endian):
self.endian = "little"
else:
self.endian = "big"
|
https://github.com/hyperspy/hyperspy/issues/2031
|
ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
---------------------------------------------------------------------------
DM3TagIDError Traceback (most recent call last)
<ipython-input-20-bfa902970b32> in <module>()
----> 1 ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order, lazy)
986 with open(filename, "rb") as f:
987 dm = DigitalMicrographReader(f)
--> 988 dm.parse_file()
989 images = [ImageObject(imdict, f, order=order, record_by=record_by)
990 for imdict in dm.get_image_dictionaries()]
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_file(self)
85 number_of_root_tags,
86 group_name="root",
---> 87 group_dict=self.tags_dict)
88
89 def parse_header(self):
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
222 else:
223 _logger.debug('File address:', self.f.tell())
--> 224 raise DM3TagIDError(tag_header['tag_id'])
225
226 def get_data_reader(self, enc_dtype):
DM3TagIDError: 66
|
DM3TagIDError
|
def parse_tags(self, ntags, group_name="root", group_dict={}):
"""Parse the DM file into a dictionary."""
unnammed_data_tags = 0
unnammed_group_tags = 0
for tag in range(ntags):
_logger.debug("Reading tag name at address: %s", self.f.tell())
tag_header = self.parse_tag_header()
tag_name = tag_header["tag_name"]
skip = True if (group_name == "ImageData" and tag_name == "Data") else False
_logger.debug("Tag name: %s", tag_name[:20])
_logger.debug("Tag ID: %s", tag_header["tag_id"])
if tag_header["tag_id"] == 21: # it's a TagType (DATA)
if not tag_name:
tag_name = "Data%i" % unnammed_data_tags
unnammed_data_tags += 1
_logger.debug("Reading data tag at address: %s", self.f.tell())
# Start reading the data
# Raises IOError if it is wrong
self.check_data_tag_delimiter()
infoarray_size = self.read_l_or_q(self.f, "big")
_logger.debug("Infoarray size: %s", infoarray_size)
if infoarray_size == 1: # Simple type
_logger.debug("Reading simple data")
etype = self.read_l_or_q(self.f, "big")
data = self.read_simple_data(etype)
elif infoarray_size == 2: # String
_logger.debug("Reading string")
enctype = self.read_l_or_q(self.f, "big")
if enctype != 18:
raise IOError("Expected 18 (string), got %i" % enctype)
string_length = self.parse_string_definition()
data = self.read_string(string_length, skip=skip)
elif infoarray_size == 3: # Array of simple type
_logger.debug("Reading simple array")
# Read array header
enctype = self.read_l_or_q(self.f, "big")
if enctype != 20: # Should be 20 if it is an array
raise IOError("Expected 20 (string), got %i" % enctype)
size, enc_eltype = self.parse_array_definition()
data = self.read_array(size, enc_eltype, skip=skip)
elif infoarray_size > 3:
enctype = self.read_l_or_q(self.f, "big")
if enctype == 15: # It is a struct
_logger.debug("Reading struct")
definition = self.parse_struct_definition()
_logger.debug("Struct definition %s", definition)
data = self.read_struct(definition, skip=skip)
elif enctype == 20: # It is an array of complex type
# Read complex array info
# The structure is
# 20 <4>, ? <4>, enc_dtype <4>, definition <?>,
# size <4>
enc_eltype = self.read_l_or_q(self.f, "big")
if enc_eltype == 15: # Array of structs
_logger.debug("Reading array of structs")
definition = self.parse_struct_definition()
size = self.read_l_or_q(self.f, "big")
_logger.debug("Struct definition: %s", definition)
_logger.debug("Array size: %s", size)
data = self.read_array(
size=size,
enc_eltype=enc_eltype,
extra={"definition": definition},
skip=skip,
)
elif enc_eltype == 18: # Array of strings
_logger.debug("Reading array of strings")
string_length = self.parse_string_definition()
size = self.read_l_or_q(self.f, "big")
data = self.read_array(
size=size,
enc_eltype=enc_eltype,
extra={"length": string_length},
skip=skip,
)
elif enc_eltype == 20: # Array of arrays
_logger.debug("Reading array of arrays")
el_length, enc_eltype = self.parse_array_definition()
size = self.read_l_or_q(self.f, "big")
data = self.read_array(
size=size,
enc_eltype=enc_eltype,
extra={"size": el_length},
skip=skip,
)
else: # Infoarray_size < 1
raise IOError("Invalided infoarray size ", infoarray_size)
group_dict[tag_name] = data
elif tag_header["tag_id"] == 20: # it's a TagGroup (GROUP)
if not tag_name:
tag_name = "TagGroup%i" % unnammed_group_tags
unnammed_group_tags += 1
_logger.debug("Reading Tag group at address: %s", self.f.tell())
ntags = self.parse_tag_group(size=True)[2]
group_dict[tag_name] = {}
self.parse_tags(
ntags=ntags, group_name=tag_name, group_dict=group_dict[tag_name]
)
else:
_logger.debug("File address:", self.f.tell())
raise DM3TagIDError(tag_header["tag_id"])
|
def parse_tags(self, ntags, group_name="root", group_dict={}):
"""Parse the DM file into a dictionary."""
unnammed_data_tags = 0
unnammed_group_tags = 0
for tag in range(ntags):
_logger.debug("Reading tag name at address: %s", self.f.tell())
tag_header = self.parse_tag_header()
tag_name = tag_header["tag_name"]
skip = True if (group_name == "ImageData" and tag_name == "Data") else False
_logger.debug("Tag name: %s", tag_name[:20])
_logger.debug("Tag ID: %s", tag_header["tag_id"])
if tag_header["tag_id"] == 21: # it's a TagType (DATA)
if not tag_name:
tag_name = "Data%i" % unnammed_data_tags
unnammed_data_tags += 1
_logger.debug("Reading data tag at address: %s", self.f.tell())
# Start reading the data
# Raises IOError if it is wrong
self.check_data_tag_delimiter()
self.skipif4()
infoarray_size = iou.read_long(self.f, "big")
_logger.debug("Infoarray size: %s", infoarray_size)
self.skipif4()
if infoarray_size == 1: # Simple type
_logger.debug("Reading simple data")
etype = iou.read_long(self.f, "big")
data = self.read_simple_data(etype)
elif infoarray_size == 2: # String
_logger.debug("Reading string")
enctype = iou.read_long(self.f, "big")
if enctype != 18:
raise IOError("Expected 18 (string), got %i" % enctype)
string_length = self.parse_string_definition()
data = self.read_string(string_length, skip=skip)
elif infoarray_size == 3: # Array of simple type
_logger.debug("Reading simple array")
# Read array header
enctype = iou.read_long(self.f, "big")
if enctype != 20: # Should be 20 if it is an array
raise IOError("Expected 20 (string), got %i" % enctype)
size, enc_eltype = self.parse_array_definition()
data = self.read_array(size, enc_eltype, skip=skip)
elif infoarray_size > 3:
enctype = iou.read_long(self.f, "big")
if enctype == 15: # It is a struct
_logger.debug("Reading struct")
definition = self.parse_struct_definition()
_logger.debug("Struct definition %s", definition)
data = self.read_struct(definition, skip=skip)
elif enctype == 20: # It is an array of complex type
# Read complex array info
# The structure is
# 20 <4>, ? <4>, enc_dtype <4>, definition <?>,
# size <4>
self.skipif4()
enc_eltype = iou.read_long(self.f, "big")
if enc_eltype == 15: # Array of structs
_logger.debug("Reading array of structs")
definition = self.parse_struct_definition()
self.skipif4() # Padding?
size = iou.read_long(self.f, "big")
_logger.debug("Struct definition: %s", definition)
_logger.debug("Array size: %s", size)
data = self.read_array(
size=size,
enc_eltype=enc_eltype,
extra={"definition": definition},
skip=skip,
)
elif enc_eltype == 18: # Array of strings
_logger.debug("Reading array of strings")
string_length = self.parse_string_definition()
size = iou.read_long(self.f, "big")
data = self.read_array(
size=size,
enc_eltype=enc_eltype,
extra={"length": string_length},
skip=skip,
)
elif enc_eltype == 20: # Array of arrays
_logger.debug("Reading array of arrays")
el_length, enc_eltype = self.parse_array_definition()
size = iou.read_long(self.f, "big")
data = self.read_array(
size=size,
enc_eltype=enc_eltype,
extra={"size": el_length},
skip=skip,
)
else: # Infoarray_size < 1
raise IOError("Invalided infoarray size ", infoarray_size)
group_dict[tag_name] = data
elif tag_header["tag_id"] == 20: # it's a TagGroup (GROUP)
if not tag_name:
tag_name = "TagGroup%i" % unnammed_group_tags
unnammed_group_tags += 1
_logger.debug("Reading Tag group at address: %s", self.f.tell())
ntags = self.parse_tag_group(skip4=3)[2]
group_dict[tag_name] = {}
self.parse_tags(
ntags=ntags, group_name=tag_name, group_dict=group_dict[tag_name]
)
else:
_logger.debug("File address:", self.f.tell())
raise DM3TagIDError(tag_header["tag_id"])
|
https://github.com/hyperspy/hyperspy/issues/2031
|
ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
---------------------------------------------------------------------------
DM3TagIDError Traceback (most recent call last)
<ipython-input-20-bfa902970b32> in <module>()
----> 1 ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order, lazy)
986 with open(filename, "rb") as f:
987 dm = DigitalMicrographReader(f)
--> 988 dm.parse_file()
989 images = [ImageObject(imdict, f, order=order, record_by=record_by)
990 for imdict in dm.get_image_dictionaries()]
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_file(self)
85 number_of_root_tags,
86 group_name="root",
---> 87 group_dict=self.tags_dict)
88
89 def parse_header(self):
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
222 else:
223 _logger.debug('File address:', self.f.tell())
--> 224 raise DM3TagIDError(tag_header['tag_id'])
225
226 def get_data_reader(self, enc_dtype):
DM3TagIDError: 66
|
DM3TagIDError
|
def get_data_reader(self, enc_dtype):
# _data_type dictionary.
# The first element of the InfoArray in the TagType
# will always be one of _data_type keys.
# the tuple reads: ('read bytes function', 'number of bytes', 'type')
dtype_dict = {
2: (iou.read_short, 2, "h"),
3: (iou.read_long, 4, "l"),
4: (iou.read_ushort, 2, "H"), # dm3 uses ushorts for unicode chars
5: (iou.read_ulong, 4, "L"),
6: (iou.read_float, 4, "f"),
7: (iou.read_double, 8, "d"),
8: (iou.read_boolean, 1, "B"),
# dm3 uses chars for 1-Byte signed integers
9: (iou.read_char, 1, "b"),
10: (iou.read_byte, 1, "b"), # 0x0a
11: (iou.read_long_long, 8, "q"), # long long, new in DM4
# unsigned long long, new in DM4
12: (iou.read_ulong_long, 8, "Q"),
15: (
self.read_struct,
None,
"struct",
), # 0x0f
18: (self.read_string, None, "c"), # 0x12
20: (self.read_array, None, "array"), # 0x14
}
return dtype_dict[enc_dtype]
|
def get_data_reader(self, enc_dtype):
# _data_type dictionary.
# The first element of the InfoArray in the TagType
# will always be one of _data_type keys.
# the tuple reads: ('read bytes function', 'number of bytes', 'type')
dtype_dict = {
2: (iou.read_short, 2, "h"),
3: (iou.read_long, 4, "l"),
4: (iou.read_ushort, 2, "H"), # dm3 uses ushorts for unicode chars
5: (iou.read_ulong, 4, "L"),
6: (iou.read_float, 4, "f"),
7: (iou.read_double, 8, "d"),
8: (iou.read_boolean, 1, "B"),
# dm3 uses chars for 1-Byte signed integers
9: (iou.read_char, 1, "b"),
10: (iou.read_byte, 1, "b"), # 0x0a
11: (iou.read_double, 8, "l"), # Unknown, new in DM4
12: (iou.read_double, 8, "l"), # Unknown, new in DM4
15: (
self.read_struct,
None,
"struct",
), # 0x0f
18: (self.read_string, None, "c"), # 0x12
20: (self.read_array, None, "array"), # 0x14
}
return dtype_dict[enc_dtype]
|
https://github.com/hyperspy/hyperspy/issues/2031
|
ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
---------------------------------------------------------------------------
DM3TagIDError Traceback (most recent call last)
<ipython-input-20-bfa902970b32> in <module>()
----> 1 ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order, lazy)
986 with open(filename, "rb") as f:
987 dm = DigitalMicrographReader(f)
--> 988 dm.parse_file()
989 images = [ImageObject(imdict, f, order=order, record_by=record_by)
990 for imdict in dm.get_image_dictionaries()]
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_file(self)
85 number_of_root_tags,
86 group_name="root",
---> 87 group_dict=self.tags_dict)
88
89 def parse_header(self):
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
222 else:
223 _logger.debug('File address:', self.f.tell())
--> 224 raise DM3TagIDError(tag_header['tag_id'])
225
226 def get_data_reader(self, enc_dtype):
DM3TagIDError: 66
|
DM3TagIDError
|
def parse_array_definition(self):
"""Reads and returns the element type and length of the array.
The position in the file must be just after the
array encoded dtype.
"""
enc_eltype = self.read_l_or_q(self.f, "big")
length = self.read_l_or_q(self.f, "big")
return length, enc_eltype
|
def parse_array_definition(self):
"""Reads and returns the element type and length of the array.
The position in the file must be just after the
array encoded dtype.
"""
self.skipif4()
enc_eltype = iou.read_long(self.f, "big")
self.skipif4()
length = iou.read_long(self.f, "big")
return length, enc_eltype
|
https://github.com/hyperspy/hyperspy/issues/2031
|
ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
---------------------------------------------------------------------------
DM3TagIDError Traceback (most recent call last)
<ipython-input-20-bfa902970b32> in <module>()
----> 1 ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order, lazy)
986 with open(filename, "rb") as f:
987 dm = DigitalMicrographReader(f)
--> 988 dm.parse_file()
989 images = [ImageObject(imdict, f, order=order, record_by=record_by)
990 for imdict in dm.get_image_dictionaries()]
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_file(self)
85 number_of_root_tags,
86 group_name="root",
---> 87 group_dict=self.tags_dict)
88
89 def parse_header(self):
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
222 else:
223 _logger.debug('File address:', self.f.tell())
--> 224 raise DM3TagIDError(tag_header['tag_id'])
225
226 def get_data_reader(self, enc_dtype):
DM3TagIDError: 66
|
DM3TagIDError
|
def parse_string_definition(self):
"""Reads and returns the length of the string.
The position in the file must be just after the
string encoded dtype.
"""
return self.read_l_or_q(self.f, "big")
|
def parse_string_definition(self):
"""Reads and returns the length of the string.
The position in the file must be just after the
string encoded dtype.
"""
self.skipif4()
return iou.read_long(self.f, "big")
|
https://github.com/hyperspy/hyperspy/issues/2031
|
ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
---------------------------------------------------------------------------
DM3TagIDError Traceback (most recent call last)
<ipython-input-20-bfa902970b32> in <module>()
----> 1 ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order, lazy)
986 with open(filename, "rb") as f:
987 dm = DigitalMicrographReader(f)
--> 988 dm.parse_file()
989 images = [ImageObject(imdict, f, order=order, record_by=record_by)
990 for imdict in dm.get_image_dictionaries()]
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_file(self)
85 number_of_root_tags,
86 group_name="root",
---> 87 group_dict=self.tags_dict)
88
89 def parse_header(self):
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
222 else:
223 _logger.debug('File address:', self.f.tell())
--> 224 raise DM3TagIDError(tag_header['tag_id'])
225
226 def get_data_reader(self, enc_dtype):
DM3TagIDError: 66
|
DM3TagIDError
|
def parse_struct_definition(self):
"""Reads and returns the struct definition tuple.
The position in the file must be just after the
struct encoded dtype.
"""
length = self.read_l_or_q(self.f, "big")
nfields = self.read_l_or_q(self.f, "big")
definition = ()
for ifield in range(nfields):
length2 = self.read_l_or_q(self.f, "big")
definition += (self.read_l_or_q(self.f, "big"),)
return definition
|
def parse_struct_definition(self):
"""Reads and returns the struct definition tuple.
The position in the file must be just after the
struct encoded dtype.
"""
self.f.seek(4, 1) # Skip the name length
self.skipif4(2)
nfields = iou.read_long(self.f, "big")
definition = ()
for ifield in range(nfields):
self.f.seek(4, 1)
self.skipif4(2)
definition += (iou.read_long(self.f, "big"),)
return definition
|
https://github.com/hyperspy/hyperspy/issues/2031
|
ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
---------------------------------------------------------------------------
DM3TagIDError Traceback (most recent call last)
<ipython-input-20-bfa902970b32> in <module>()
----> 1 ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order, lazy)
986 with open(filename, "rb") as f:
987 dm = DigitalMicrographReader(f)
--> 988 dm.parse_file()
989 images = [ImageObject(imdict, f, order=order, record_by=record_by)
990 for imdict in dm.get_image_dictionaries()]
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_file(self)
85 number_of_root_tags,
86 group_name="root",
---> 87 group_dict=self.tags_dict)
88
89 def parse_header(self):
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
222 else:
223 _logger.debug('File address:', self.f.tell())
--> 224 raise DM3TagIDError(tag_header['tag_id'])
225
226 def get_data_reader(self, enc_dtype):
DM3TagIDError: 66
|
DM3TagIDError
|
def parse_tag_group(self, size=False):
"""Parse the root TagGroup of the given DM3 file f.
Returns the tuple (is_sorted, is_open, n_tags).
endian can be either 'big' or 'little'.
"""
is_sorted = iou.read_byte(self.f, "big")
is_open = iou.read_byte(self.f, "big")
if self.dm_version == 4 and size:
# Just guessing that this is the size
size = self.read_l_or_q(self.f, "big")
n_tags = self.read_l_or_q(self.f, "big")
return bool(is_sorted), bool(is_open), n_tags
|
def parse_tag_group(self, skip4=1):
"""Parse the root TagGroup of the given DM3 file f.
Returns the tuple (is_sorted, is_open, n_tags).
endian can be either 'big' or 'little'.
"""
is_sorted = iou.read_byte(self.f, "big")
is_open = iou.read_byte(self.f, "big")
self.skipif4(n=skip4)
n_tags = iou.read_long(self.f, "big")
return bool(is_sorted), bool(is_open), n_tags
|
https://github.com/hyperspy/hyperspy/issues/2031
|
ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
---------------------------------------------------------------------------
DM3TagIDError Traceback (most recent call last)
<ipython-input-20-bfa902970b32> in <module>()
----> 1 ss = hs.load('D:\\Vadim\\2018-08-02_STEM-d_C_fiber_Fengshan\\bin2\\02_0V.dm4')
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in file_reader(filename, record_by, order, lazy)
986 with open(filename, "rb") as f:
987 dm = DigitalMicrographReader(f)
--> 988 dm.parse_file()
989 images = [ImageObject(imdict, f, order=order, record_by=record_by)
990 for imdict in dm.get_image_dictionaries()]
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_file(self)
85 number_of_root_tags,
86 group_name="root",
---> 87 group_dict=self.tags_dict)
88
89 def parse_header(self):
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
219 ntags=ntags,
220 group_name=tag_name,
--> 221 group_dict=group_dict[tag_name])
222 else:
223 _logger.debug('File address:', self.f.tell())
C:\Users\migunov\AppData\Local\conda\conda\envs\hs\lib\site-packages\hyperspy\io_plugins\digital_micrograph.py in parse_tags(self, ntags, group_name, group_dict)
222 else:
223 _logger.debug('File address:', self.f.tell())
--> 224 raise DM3TagIDError(tag_header['tag_id'])
225
226 def get_data_reader(self, enc_dtype):
DM3TagIDError: 66
|
DM3TagIDError
|
def spd_reader(
filename,
endianess="<",
nav_units=None,
spc_fname=None,
ipr_fname=None,
load_all_spc=False,
**kwargs,
):
"""
Read data from an SPD spectral map specified by filename.
Parameters
----------
filename : str
Name of SPD file to read
endianess : char
Byte-order of data to read
nav_units : 'nm', 'um', or None
Default navigation units for EDAX data is in microns, so this is the
default unit to save in the signal. Can also be specified as 'nm',
which will output a signal with nm scale instead.
spc_fname : None or str
Name of file from which to read the spectral calibration. If data
was exported fully from EDAX TEAM software, an .spc file with the
same name as the .spd should be present.
If `None`, the default filename will be searched for.
Otherwise, the name of the .spc file to use for calibration can
be explicitly given as a string.
ipr_fname : None or str
Name of file from which to read the spatial calibration. If data
was exported fully from EDAX TEAM software, an .ipr file with the
same name as the .spd (plus a \"_Img\" suffix) should be present.
If `None`, the default filename will be searched for.
Otherwise, the name of the .ipr file to use for spatial calibration
can be explicitly given as a string.
load_all_spc : bool
Switch to control if all of the .spc header is read, or just the
important parts for import into HyperSpy
kwargs**
Remaining arguments are passed to the Numpy ``memmap`` function
Returns
-------
list
list with dictionary of signal information to be passed back to
hyperspy.io.load_with_reader
"""
with open(filename, "rb") as f:
spd_header = np.fromfile(f, dtype=get_spd_dtype_list(endianess), count=1)
original_metadata = {"spd_header": sarray2dict(spd_header)}
# dimensions of map data:
nx = original_metadata["spd_header"]["nPoints"]
ny = original_metadata["spd_header"]["nLines"]
nz = original_metadata["spd_header"]["nChannels"]
data_offset = original_metadata["spd_header"]["dataOffset"]
data_type = {"1": "u1", "2": "u2", "4": "u4"}[
str(original_metadata["spd_header"]["countBytes"])
]
lazy = kwargs.pop("lazy", False)
mode = kwargs.pop("mode", "c")
if lazy:
mode = "r"
# Read data from file into a numpy memmap object
data = (
np.memmap(f, mode=mode, offset=data_offset, dtype=data_type, **kwargs)
.squeeze()
.reshape((nz, nx, ny), order="F")
.T
)
# Convert char arrays to strings:
original_metadata["spd_header"]["tag"] = spd_header["tag"][0].view("S16")[0]
# fName is the name of the .bmp (and .ipr) file of the map
original_metadata["spd_header"]["fName"] = spd_header["fName"][0].view("S120")[0]
# Get name of .spc file from the .spd map (if not explicitly given):
if spc_fname is None:
spc_path = os.path.dirname(filename)
spc_basename = os.path.splitext(os.path.basename(filename))[0] + ".spc"
spc_fname = os.path.join(spc_path, spc_basename)
# Get name of .ipr file from bitmap image (if not explicitly given):
if ipr_fname is None:
ipr_basename = (
os.path.splitext(
os.path.basename(original_metadata["spd_header"]["fName"])
)[0].decode()
+ ".ipr"
)
ipr_path = os.path.dirname(filename)
ipr_fname = os.path.join(ipr_path, ipr_basename)
# Flags to control reading of files
read_spc = os.path.isfile(spc_fname)
read_ipr = os.path.isfile(ipr_fname)
# Read the .ipr header (if possible)
if read_ipr:
with open(ipr_fname, "rb") as f:
_logger.debug(" From .spd reader - reading .ipr {}".format(ipr_fname))
ipr_header = __get_ipr_header(f, endianess)
original_metadata["ipr_header"] = sarray2dict(ipr_header)
# Workaround for type error when saving hdf5:
# save as list of strings instead of numpy unicode array
# see https://github.com/hyperspy/hyperspy/pull/2007 and
# https://github.com/h5py/h5py/issues/289 for context
original_metadata["ipr_header"]["charText"] = [
np.string_(i) for i in original_metadata["ipr_header"]["charText"]
]
else:
_logger.warning(
"Could not find .ipr file named {}.\n"
"No spatial calibration will be loaded."
"\n".format(ipr_fname)
)
# Read the .spc header (if possible)
if read_spc:
with open(spc_fname, "rb") as f:
_logger.debug(" From .spd reader - reading .spc {}".format(spc_fname))
spc_header = __get_spc_header(f, endianess, load_all_spc)
spc_dict = sarray2dict(spc_header)
original_metadata["spc_header"] = spc_dict
else:
_logger.warning(
"Could not find .spc file named {}.\n"
"No spectral metadata will be loaded."
"\n".format(spc_fname)
)
# create the energy axis dictionary:
energy_axis = {
"size": data.shape[2],
"index_in_array": 2,
"name": "Energy",
"scale": original_metadata["spc_header"]["evPerChan"] / 1000.0
if read_spc
else 1,
"offset": original_metadata["spc_header"]["startEnergy"] if read_spc else 1,
"units": "keV" if read_spc else t.Undefined,
}
# Handle navigation units input:
scale = 1000 if nav_units == "nm" else 1
if nav_units is not "nm":
if nav_units not in [None, "um"]:
_logger.warning(
'Did not understand nav_units input "{}". '
"Defaulting to microns.\n".format(nav_units)
)
nav_units = r"$\mu m$"
# Create navigation axes dictionaries:
x_axis = {
"size": data.shape[1],
"index_in_array": 1,
"name": "x",
"scale": original_metadata["ipr_header"]["mppX"] * scale if read_ipr else 1,
"offset": 0,
"units": nav_units if read_ipr else t.Undefined,
}
y_axis = {
"size": data.shape[0],
"index_in_array": 0,
"name": "y",
"scale": original_metadata["ipr_header"]["mppY"] * scale if read_ipr else 1,
"offset": 0,
"units": nav_units if read_ipr else t.Undefined,
}
# Assign metadata for spectrum image:
metadata = {
"General": {
"original_filename": os.path.split(filename)[1],
"title": "EDS Spectrum Image",
},
"Signal": {
"signal_type": "EDS_SEM",
"record_by": "spectrum",
},
}
# Add spectral calibration and elements (if present):
if read_spc:
metadata = _add_spc_metadata(metadata, spc_dict)
# Define navigation and signal axes:
axes = [y_axis, x_axis, energy_axis]
dictionary = {
"data": data,
"axes": axes,
"metadata": metadata,
"original_metadata": original_metadata,
}
return [
dictionary,
]
|
def spd_reader(
filename,
endianess="<",
nav_units=None,
spc_fname=None,
ipr_fname=None,
load_all_spc=False,
**kwargs,
):
"""
Read data from an SPD spectral map specified by filename.
Parameters
----------
filename : str
Name of SPD file to read
endianess : char
Byte-order of data to read
nav_units : 'nm', 'um', or None
Default navigation units for EDAX data is in microns, so this is the
default unit to save in the signal. Can also be specified as 'nm',
which will output a signal with nm scale instead.
spc_fname : None or str
Name of file from which to read the spectral calibration. If data
was exported fully from EDAX TEAM software, an .spc file with the
same name as the .spd should be present.
If `None`, the default filename will be searched for.
Otherwise, the name of the .spc file to use for calibration can
be explicitly given as a string.
ipr_fname : None or str
Name of file from which to read the spatial calibration. If data
was exported fully from EDAX TEAM software, an .ipr file with the
same name as the .spd (plus a \"_Img\" suffix) should be present.
If `None`, the default filename will be searched for.
Otherwise, the name of the .ipr file to use for spatial calibration
can be explicitly given as a string.
load_all_spc : bool
Switch to control if all of the .spc header is read, or just the
important parts for import into HyperSpy
kwargs**
Remaining arguments are passed to the Numpy ``memmap`` function
Returns
-------
list
list with dictionary of signal information to be passed back to
hyperspy.io.load_with_reader
"""
with open(filename, "rb") as f:
spd_header = np.fromfile(f, dtype=get_spd_dtype_list(endianess), count=1)
original_metadata = {"spd_header": sarray2dict(spd_header)}
# dimensions of map data:
nx = original_metadata["spd_header"]["nPoints"]
ny = original_metadata["spd_header"]["nLines"]
nz = original_metadata["spd_header"]["nChannels"]
data_offset = original_metadata["spd_header"]["dataOffset"]
data_type = {"1": "u1", "2": "u2", "4": "u4"}[
str(original_metadata["spd_header"]["countBytes"])
]
lazy = kwargs.pop("lazy", False)
mode = kwargs.pop("mode", "c")
if lazy:
mode = "r"
# Read data from file into a numpy memmap object
data = (
np.memmap(f, mode=mode, offset=data_offset, dtype=data_type, **kwargs)
.squeeze()
.reshape((nz, nx, ny), order="F")
.T
)
# Convert char arrays to strings:
original_metadata["spd_header"]["tag"] = spd_header["tag"][0].view("S16")[0]
# fName is the name of the .bmp (and .ipr) file of the map
original_metadata["spd_header"]["fName"] = spd_header["fName"][0].view("S120")[0]
# Get name of .spc file from the .spd map (if not explicitly given):
if spc_fname is None:
spc_path = os.path.dirname(filename)
spc_basename = os.path.splitext(os.path.basename(filename))[0] + ".spc"
spc_fname = os.path.join(spc_path, spc_basename)
# Get name of .ipr file from bitmap image (if not explicitly given):
if ipr_fname is None:
ipr_basename = (
os.path.splitext(
os.path.basename(original_metadata["spd_header"]["fName"])
)[0].decode()
+ ".ipr"
)
ipr_path = os.path.dirname(filename)
ipr_fname = os.path.join(ipr_path, ipr_basename)
# Flags to control reading of files
read_spc = os.path.isfile(spc_fname)
read_ipr = os.path.isfile(ipr_fname)
# Read the .ipr header (if possible)
if read_ipr:
with open(ipr_fname, "rb") as f:
_logger.debug(" From .spd reader - reading .ipr {}".format(ipr_fname))
ipr_header = __get_ipr_header(f, endianess)
original_metadata["ipr_header"] = sarray2dict(ipr_header)
else:
_logger.warning(
"Could not find .ipr file named {}.\n"
"No spatial calibration will be loaded."
"\n".format(ipr_fname)
)
# Read the .spc header (if possible)
if read_spc:
with open(spc_fname, "rb") as f:
_logger.debug(" From .spd reader - reading .spc {}".format(spc_fname))
spc_header = __get_spc_header(f, endianess, load_all_spc)
spc_dict = sarray2dict(spc_header)
original_metadata["spc_header"] = spc_dict
else:
_logger.warning(
"Could not find .spc file named {}.\n"
"No spectral metadata will be loaded."
"\n".format(spc_fname)
)
# create the energy axis dictionary:
energy_axis = {
"size": data.shape[2],
"index_in_array": 2,
"name": "Energy",
"scale": original_metadata["spc_header"]["evPerChan"] / 1000.0
if read_spc
else 1,
"offset": original_metadata["spc_header"]["startEnergy"] if read_spc else 1,
"units": "keV" if read_spc else t.Undefined,
}
# Handle navigation units input:
scale = 1000 if nav_units == "nm" else 1
if nav_units is not "nm":
if nav_units not in [None, "um"]:
_logger.warning(
'Did not understand nav_units input "{}". '
"Defaulting to microns.\n".format(nav_units)
)
nav_units = r"$\mu m$"
# Create navigation axes dictionaries:
x_axis = {
"size": data.shape[1],
"index_in_array": 1,
"name": "x",
"scale": original_metadata["ipr_header"]["mppX"] * scale if read_ipr else 1,
"offset": 0,
"units": nav_units if read_ipr else t.Undefined,
}
y_axis = {
"size": data.shape[0],
"index_in_array": 0,
"name": "y",
"scale": original_metadata["ipr_header"]["mppY"] * scale if read_ipr else 1,
"offset": 0,
"units": nav_units if read_ipr else t.Undefined,
}
# Assign metadata for spectrum image:
metadata = {
"General": {
"original_filename": os.path.split(filename)[1],
"title": "EDS Spectrum Image",
},
"Signal": {
"signal_type": "EDS_SEM",
"record_by": "spectrum",
},
}
# Add spectral calibration and elements (if present):
if read_spc:
metadata = _add_spc_metadata(metadata, spc_dict)
# Define navigation and signal axes:
axes = [y_axis, x_axis, energy_axis]
dictionary = {
"data": data,
"axes": axes,
"metadata": metadata,
"original_metadata": original_metadata,
}
return [
dictionary,
]
|
https://github.com/hyperspy/hyperspy/issues/1916
|
s = hs.load('pyrite.hspy')
crop = s.inav[:,:,::5].isig[:2.6]
crop.compute()
crop.save('pyrite_50frames_2_6keV.hspy')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in overwrite_dataset(group, data, key, signal_axes, **kwds)
548
--> 549 dset = group.require_dataset(key, **these_kwds)
550 got_data = True
C:\anaconda3\envs\hyperspy\lib\site-packages\h5py\_hl\group.py in require_dataset(self, name, shape, dtype, exact, **kwds)
126 if not name in self:
--> 127 return self.create_dataset(name, *(shape, dtype), **kwds)
128
C:\anaconda3\envs\hyperspy\lib\site-packages\h5py\_hl\group.py in create_dataset(self, name, shape, dtype, data, **kwds)
105 with phil:
--> 106 dsid = dataset.make_new_dset(self, shape, dtype, data, **kwds)
107 dset = dataset.Dataset(dsid)
C:\anaconda3\envs\hyperspy\lib\site-packages\h5py\_hl\dataset.py in make_new_dset(parent, shape, dtype, data, chunks, compression, shuffle, fletcher32, maxshape, compression_opts, fillvalue, scaleoffset, track_times)
99 dtype = numpy.dtype(dtype)
--> 100 tid = h5t.py_create(dtype, logical=1)
101
h5py\h5t.pyx in h5py.h5t.py_create()
h5py\h5t.pyx in h5py.h5t.py_create()
h5py\h5t.pyx in h5py.h5t.py_create()
TypeError: No conversion path for dtype: dtype('<U32')
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-12-c5592dab8e1f> in <module>()
----> 1 small_crop.save('pyrite_50frames_2_6keV.hspy')
c:\users\jat\git_repos\hyperspy\hyperspy\signal.py in save(self, filename, overwrite, extension, **kwds)
2133 basename, ext = os.path.splitext(filename)
2134 filename = basename + '.' + extension
-> 2135 io.save(filename, self, overwrite=overwrite, **kwds)
2136
2137 def _replot(self):
c:\users\jat\git_repos\hyperspy\hyperspy\io.py in save(filename, signal, overwrite, **kwds)
480 overwrite = overwrite_method(filename)
481 if overwrite is True:
--> 482 writer.file_writer(filename, signal, **kwds)
483 _logger.info('The %s file was created' % filename)
484 folder, filename = os.path.split(os.path.abspath(filename))
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in file_writer(filename, signal, *args, **kwds)
729 smd.record_by = ""
730 try:
--> 731 write_signal(signal, expg, **kwds)
732 except BaseException:
733 raise
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in write_signal(signal, group, **kwds)
687 original_par = group.create_group(original_metadata)
688 dict2hdfgroup(signal.original_metadata.as_dictionary(), original_par,
--> 689 **kwds)
690 learning_results = group.create_group('learning_results')
691 dict2hdfgroup(signal.learning_results.__dict__,
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
434 if isinstance(value, dict):
435 dict2hdfgroup(value, group.create_group(key),
--> 436 **kwds)
437 elif isinstance(value, DictionaryTreeBrowser):
438 dict2hdfgroup(value.as_dictionary(),
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
434 if isinstance(value, dict):
435 dict2hdfgroup(value, group.create_group(key),
--> 436 **kwds)
437 elif isinstance(value, DictionaryTreeBrowser):
438 dict2hdfgroup(value.as_dictionary(),
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
434 if isinstance(value, dict):
435 dict2hdfgroup(value, group.create_group(key),
--> 436 **kwds)
437 elif isinstance(value, DictionaryTreeBrowser):
438 dict2hdfgroup(value.as_dictionary(),
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
434 if isinstance(value, dict):
435 dict2hdfgroup(value, group.create_group(key),
--> 436 **kwds)
437 elif isinstance(value, DictionaryTreeBrowser):
438 dict2hdfgroup(value.as_dictionary(),
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in dict2hdfgroup(dictionary, group, **kwds)
443 write_signal(value, group.require_group(kn))
444 elif isinstance(value, (np.ndarray, h5py.Dataset, da.Array)):
--> 445 overwrite_dataset(group, value, key, **kwds)
446 elif value is None:
447 group.attrs[key] = '_None_'
c:\users\jat\git_repos\hyperspy\hyperspy\io_plugins\hspy.py in overwrite_dataset(group, data, key, signal_axes, **kwds)
552 # if the shape or dtype/etc do not match,
553 # we delete the old one and create new in the next loop run
--> 554 del group[key]
555 if dset == data:
556 # just a reference to already created thing
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
C:\anaconda3\envs\hyperspy\lib\site-packages\h5py\_hl\group.py in __delitem__(self, name)
301 def __delitem__(self, name):
302 """ Delete (unlink) an item from this group. """
--> 303 self.id.unlink(self._e(name))
304
305 @with_phil
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\h5g.pyx in h5py.h5g.GroupID.unlink()
KeyError: "Couldn't delete link (callback link pointer is NULL (specified link may be '.' or not exist))"
|
TypeError
|
def _update_patch_size(self):
if self.is_on() and self.patch:
ro, ri = self.size
self.patch[0].radius = ro
if ri > 0:
# Add the inner circle
if len(self.patch) == 1:
# Need to remove the previous patch before using
# `_add_patch_to`
self.ax.artists.remove(self.patch[0])
self.patch = []
self._add_patch_to(self.ax)
self.patch[1].radius = ri
self._update_resizers()
self.draw_patch()
|
def _update_patch_size(self):
if self.is_on() and self.patch:
ro, ri = self.size
self.patch[0].radius = ro
if ri > 0:
self.patch[1].radius = ri
self._update_resizers()
self.draw_patch()
|
https://github.com/hyperspy/hyperspy/issues/1954
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-1-4e82cadb3c15> in <module>()
11 annular_roi = hs.roi.CircleROI(r=6,cy=64,cx=64,r_inner=3)
12
---> 13 Roi2D = annular_roi.interactive(color='red',signal=s)
~/miniconda3/envs/hyperspy/lib/python3.6/site-packages/hyperspy/roi.py in interactive(self, signal, navigation_signal, out, color, **kwargs)
360 if navigation_signal not in self.signal_map:
361 self.add_widget(navigation_signal, color=color,
--> 362 axes=kwargs.get("axes", None))
363 if (self.update not in
364 signal.axes_manager.events.any_axis_changed.connected):
~/miniconda3/envs/hyperspy/lib/python3.6/site-packages/hyperspy/roi.py in add_widget(self, signal, axes, widget, color)
445 widget.set_mpl_ax(ax)
446 with widget.events.changed.suppress_callback(self._on_widget_change):
--> 447 self._apply_roi2widget(widget)
448
449 # Connect widget changes to on_widget_change
~/miniconda3/envs/hyperspy/lib/python3.6/site-packages/hyperspy/roi.py in _apply_roi2widget(self, widget)
825 widget.position = (self.cx, self.cy)
826 inner = self.r_inner if self.r_inner != t.Undefined else 0.0
--> 827 widget.size = (self.r, inner)
828
829 def _get_widget_type(self, axes, signal):
~/miniconda3/envs/hyperspy/lib/python3.6/site-packages/hyperspy/drawing/widget.py in <lambda>(s, v)
569 self._size_changed()
570
--> 571 size = property(lambda s: s._get_size(), lambda s, v: s._set_size(v))
572
573 def _do_snap_size(self, value=None):
~/miniconda3/envs/hyperspy/lib/python3.6/site-packages/hyperspy/drawing/_widgets/circle.py in _set_size(self, value)
69 if np.any(self._size != value):
70 self._size = value
---> 71 self._size_changed()
72
73 def increase_size(self):
~/miniconda3/envs/hyperspy/lib/python3.6/site-packages/hyperspy/drawing/widget.py in _size_changed(self)
613 self.events.resized.trigger(self)
614 self.events.changed.trigger(self)
--> 615 self._update_patch_size()
616
617 def get_size_in_indices(self):
~/miniconda3/envs/hyperspy/lib/python3.6/site-packages/hyperspy/drawing/_widgets/circle.py in _update_patch_size(self)
153 self.patch[0].radius = ro
154 if ri > 0:
--> 155 self.patch[1].radius = ri
156 self._update_resizers()
157 self.draw_patch()
IndexError: list index out of range
|
IndexError
|
def __enter__(self):
self.release_version = Release.version
# Get the hash from the git repository if available
self.restore_version = False
if self.release_version.endswith(".dev"):
p = subprocess.Popen(
["git", "describe", "--tags", "--dirty", "--always"],
stdout=subprocess.PIPE,
shell=True,
)
stdout = p.communicate()[0]
if p.returncode != 0:
# Git is not available, we keep the version as is
self.restore_version = False
self.version = self.release_version
else:
gd = stdout[1:].strip().decode()
# Remove the tag
gd = gd[gd.index("-") + 1 :]
self.version = self.release_version + "+git."
self.version += gd.replace("-", ".")
update_version(self.version)
self.restore_version = True
else:
self.version = self.release_version
return self.version
|
def __enter__(self):
self.release_version = Release.version
# Get the hash from the git repository if available
self.restore_version = False
git_master_path = ".git/refs/heads/master"
if self.release_version.endswith(".dev"):
p = subprocess.Popen(
["git", "describe", "--tags", "--dirty", "--always"], stdout=subprocess.PIPE
)
stdout = p.communicate()[0]
if p.returncode != 0:
# Git is not available, we keep the version as is
self.restore_version = False
self.version = self.release_version
else:
gd = stdout[1:].strip().decode()
# Remove the tag
gd = gd[gd.index("-") + 1 :]
self.version = self.release_version + "+git."
self.version += gd.replace("-", ".")
update_version(self.version)
self.restore_version = True
else:
self.version = self.release_version
return self.version
|
https://github.com/hyperspy/hyperspy/issues/1704
|
Obtaining file:///C:/Users/macark/Documents/GitHub/hyperspy
Complete output from command python setup.py egg_info:
C:\Users\macark\Documents\GitHub\hyperspy\setup.py:180: UserWarning: WARNING: C compiler can't be found.
Only slow pure python alternative functions will be available.
To use fast implementation of some functions writen in cython/c either:
a) check that you have compiler (EXACTLY SAME as your python
distribution was compiled with) installed,
b) use binary distribution of hyperspy (i.e. wheels, egg, (only osx and win)).
Installation will continue in 5 sec...
Installation will continue in 5 sec...""")
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\macark\Documents\GitHub\hyperspy\setup.py", line 298, in <module>
with update_version_when_dev() as version:
File "C:\Users\macark\Documents\GitHub\hyperspy\setup.py", line 275, in __enter__
stdout=subprocess.PIPE)
File "c:\program files\anaconda3\lib\subprocess.py", line 947, in __init__
restore_signals, start_new_session)
File "c:\program files\anaconda3\lib\subprocess.py", line 1224, in _execute_child
startupinfo)
FileNotFoundError: [WinError 2] The system cannot find the file specified
----------------------------------------
Command "python setup.py egg_info" failed with error code 1 in C:\Users\macark\Documents\GitHub\hyperspy\
|
FileNotFoundError
|
def select(self):
"""
Cause this widget to be the selected widget in its MPL axes. This
assumes that the widget has its patch added to the MPL axes.
"""
if not self.patch or not self.is_on() or not self.ax:
return
canvas = self.ax.figure.canvas
# Simulate a pick event
x, y = self.patch[0].get_transform().transform_point((0, 0))
mouseevent = MouseEvent("pick_event", canvas, x, y)
# when the widget is added programatically, mouseevent can be "empty"
if mouseevent.button:
canvas.pick_event(mouseevent, self.patch[0])
self.picked = False
|
def select(self):
"""
Cause this widget to be the selected widget in its MPL axes. This
assumes that the widget has its patch added to the MPL axes.
"""
if not self.patch or not self.is_on() or not self.ax:
return
canvas = self.ax.figure.canvas
# Simulate a pick event
x, y = self.patch[0].get_transform().transform_point((0, 0))
mouseevent = MouseEvent("pick_event", canvas, x, y)
canvas.pick_event(mouseevent, self.patch[0])
self.picked = False
|
https://github.com/hyperspy/hyperspy/issues/1693
|
Traceback (most recent call last):
File "<ipython-input-1-83892adf2fe2>", line 12, in <module>
w = roi_nav.add_widget(s)
File "/home/eric/Python_dev/hyperspy/hyperspy/roi.py", line 447, in add_widget
widget.set_mpl_ax(ax)
File "/home/eric/Python_dev/hyperspy/hyperspy/drawing/widget.py", line 168, in set_mpl_ax
self.select()
File "/home/eric/Python_dev/hyperspy/hyperspy/drawing/widget.py", line 182, in select
canvas.pick_event(mouseevent, self.patch[0])
File "/opt/anaconda3/lib/python3.6/site-packages/matplotlib/backend_bases.py", line 1883, in pick_event
self.callbacks.process(s, event)
File "/opt/anaconda3/lib/python3.6/site-packages/matplotlib/cbook.py", line 554, in process
proxy(*args, **kwargs)
File "/opt/anaconda3/lib/python3.6/site-packages/matplotlib/cbook.py", line 416, in __call__
return mtd(*args, **kwargs)
File "/home/eric/Python_dev/hyperspy/hyperspy/drawing/widget.py", line 671, in onpick
super(ResizableDraggableWidgetBase, self).onpick(event)
File "/home/eric/Python_dev/hyperspy/hyperspy/drawing/widget.py", line 438, in onpick
super(DraggableWidgetBase, self).onpick(event)
File "/home/eric/Python_dev/hyperspy/hyperspy/drawing/widget.py", line 958, in onpick
self.pick_offset = (x - self._pos[0], y - self._pos[1])
TypeError: unsupported operand type(s) for -: 'NoneType' and 'float'
|
TypeError
|
def __init__(self, signal1D, yscale=1.0, xscale=1.0, shift=0.0, interpolate=True):
Component.__init__(self, ["yscale", "xscale", "shift"])
self._position = self.shift
self._whitelist["signal1D"] = ("init,sig", signal1D)
self.signal = signal1D
self.yscale.free = True
self.yscale.value = yscale
self.xscale.value = xscale
self.shift.value = shift
self.prepare_interpolator()
# Options
self.isbackground = True
self.convolved = False
self.interpolate = interpolate
|
def __init__(self, signal1D):
Component.__init__(self, ["yscale", "xscale", "shift"])
self._position = self.shift
self._whitelist["signal1D"] = ("init,sig", signal1D)
self.signal = signal1D
self.yscale.free = True
self.yscale.value = 1.0
self.xscale.value = 1.0
self.shift.value = 0.0
self.prepare_interpolator()
# Options
self.isbackground = True
self.convolved = False
self.interpolate = True
|
https://github.com/hyperspy/hyperspy/issues/1862
|
s = hs.datasets.example_signals.EDS_SEM_Spectrum()
hs.model.components1D.ScalableFixedPattern(s,xscale=2)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-97-f7d2822487b5> in <module>()
----> 1 c = hs.model.components1D.ScalableFixedPattern(s,xscale=2)
TypeError: __init__() got an unexpected keyword argument 'xscale'
|
TypeError
|
def _setup_vfs(self):
"""Setup the virtual file system tree represented as python dictionary
with values populated with SFSTreeItem instances
See also:
SFSTreeItem
"""
with open(self.filename, "rb") as fn:
# check if file tree do not exceed one chunk:
n_file_tree_chunks = ceil((self.n_tree_items * 0x200) / (self.chunksize - 0x20))
if n_file_tree_chunks == 1:
# file tree do not exceed one chunk in bcf:
fn.seek(self.chunksize * self.tree_address + 0x138)
raw_tree = fn.read(0x200 * self.n_tree_items)
else:
temp_str = io.BytesIO()
tree_address = self.tree_address
tree_items_in_chunk = (self.chunksize - 0x20) // 0x200
for i in range(n_file_tree_chunks):
# jump to tree/list address:
fn.seek(self.chunksize * tree_address + 0x118)
# next tree/list address:
tree_address = strct_unp("<I", fn.read(4))[0]
fn.seek(28, 1)
temp_str.write(fn.read(tree_items_in_chunk * 0x200))
temp_str.seek(0)
raw_tree = temp_str.read(self.n_tree_items * 0x200)
temp_str.close()
temp_item_list = [
SFSTreeItem(raw_tree[i * 0x200 : (i + 1) * 0x200], self)
for i in range(self.n_tree_items)
]
# temp list with parents of items
paths = [[h.parent] for h in temp_item_list]
# checking the compression header which can be different per file:
self._check_the_compresion(temp_item_list)
if self.compression == "zlib":
for c in temp_item_list:
if not c.is_dir:
c.setup_compression_metadata()
# convert the items to virtual file system tree
dict_tree = self._flat_items_to_dict(paths, temp_item_list)
# and finaly set the Virtual file system:
self.vfs = dict_tree["root"]
|
def _setup_vfs(self):
"""Setup the virtual file system tree represented as python dictionary
with values populated with SFSTreeItem instances
See also:
SFSTreeItem
"""
with open(self.filename, "rb") as fn:
# file tree do not exceed one chunk in bcf:
fn.seek(self.chunksize * self.tree_address + 0x138)
raw_tree = fn.read(0x200 * self.n_tree_items)
temp_item_list = [
SFSTreeItem(raw_tree[i * 0x200 : (i + 1) * 0x200], self)
for i in range(self.n_tree_items)
]
# temp list with parents of items
paths = [[h.parent] for h in temp_item_list]
# checking the compression header which can be different per file:
self._check_the_compresion(temp_item_list)
if self.compression == "zlib":
for c in temp_item_list:
if not c.is_dir:
c.setup_compression_metadata()
# convert the items to virtual file system tree
dict_tree = self._flat_items_to_dict(paths, temp_item_list)
# and finaly set the Virtual file system:
self.vfs = dict_tree["root"]
|
https://github.com/hyperspy/hyperspy/issues/1801
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-e233613e1d5d> in <module>()
----> 1 bk=hs.load('eds.bcf')
~/Documents/hyperspy/hyperspy/io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
~/Documents/hyperspy/hyperspy/io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
~/Documents/hyperspy/hyperspy/io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
~/Documents/hyperspy/hyperspy/io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in file_reader(filename, select_type, index, downsample, cutoff_at_kV, instrument, lazy)
1213
1214 # objectified bcf file:
-> 1215 obj_bcf = BCF_reader(filename, instrument=instrument)
1216 if select_type == 'image':
1217 return bcf_imagery(obj_bcf)
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in __init__(self, filename, instrument)
880 header_byte_str = header_file.get_as_BytesIO_string().getvalue()
881 hd_bt_str = fix_dec_patterns.sub(b'\\1.\\2', header_byte_str)
--> 882 self.header = HyperHeader(header_byte_str, self.available_indexes, instrument=instrument)
883 self.hypermap = {}
884
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in __init__(self, xml_str, indexes, instrument)
569 self._set_microscope(root)
570 self._get_mode(instrument)
--> 571 self._set_images(root)
572 self.elements = {}
573 self._set_elements(root)
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in _set_images(self, root)
692 if not(n.get('Name')):
693 image_node = n
--> 694 self.image = self._parse_image(image_node)
695 if self.version == 2:
696 overview_node = root.findall(
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in _parse_image(self, xml_node, overview)
671 if any(array1):
672 item = self.gen_hspy_item_dict_basic()
--> 673 data = array1.reshape((image.height, image.width))
674 detector_name = str(img.find('./Description').text)
675 item['data'] = data
ValueError: cannot reshape array of size 114688 into shape (448,512)
|
ValueError
|
def _parse_image(self, xml_node, overview=False):
"""parse image from bruker xml image node."""
if overview:
rect_node = xml_node.find(
"./ChildClassInstances"
"/ClassInstance["
# "@Type='TRTRectangleOverlayElement' and "
"@Name='Map']/TRTSolidOverlayElement/"
"TRTBasicLineOverlayElement/TRTOverlayElement"
)
if rect_node is not None:
over_rect = dictionarize(rect_node)["TRTOverlayElement"]["Rect"]
rect = {
"y1": over_rect["Top"] * self.y_res,
"x1": over_rect["Left"] * self.x_res,
"y2": over_rect["Bottom"] * self.y_res,
"x2": over_rect["Right"] * self.x_res,
}
over_dict = {
"marker_type": "Rectangle",
"plot_on_signal": True,
"data": rect,
"marker_properties": {"color": "yellow", "linewidth": 2},
}
image = Container()
image.width = int(xml_node.find("./Width").text) # in pixels
image.height = int(xml_node.find("./Height").text) # in pixels
image.plane_count = int(xml_node.find("./PlaneCount").text)
image.images = []
for i in range(image.plane_count):
img = xml_node.find("./Plane" + str(i))
raw = codecs.decode((img.find("./Data").text).encode("ascii"), "base64")
array1 = np.frombuffer(raw, dtype=np.uint16)
if any(array1):
item = self.gen_hspy_item_dict_basic()
data = array1.reshape((image.height, image.width))
detector_name = str(img.find("./Description").text)
item["data"] = data
item["axes"][0]["size"] = image.height
item["axes"][1]["size"] = image.width
item["metadata"]["General"] = {"title": detector_name}
item["metadata"]["Signal"] = {
"signal_type": detector_name,
"record_by": "image",
}
if overview and (rect_node is not None):
item["metadata"]["Markers"] = {"overview": over_dict}
image.images.append(item)
return image
|
def _parse_image(self, xml_node, overview=False):
"""parse image from bruker xml image node."""
if overview:
rect_node = xml_node.find(
"./ChildClassInstances"
"/ClassInstance["
# "@Type='TRTRectangleOverlayElement' and "
"@Name='Map']/TRTSolidOverlayElement/"
"TRTBasicLineOverlayElement/TRTOverlayElement"
)
over_rect = dictionarize(rect_node)["TRTOverlayElement"]["Rect"]
rect = {
"y1": over_rect["Top"] * self.y_res,
"x1": over_rect["Left"] * self.x_res,
"y2": over_rect["Bottom"] * self.y_res,
"x2": over_rect["Right"] * self.x_res,
}
over_dict = {
"marker_type": "Rectangle",
"plot_on_signal": True,
"data": rect,
"marker_properties": {"color": "yellow", "linewidth": 2},
}
image = Container()
image.width = int(xml_node.find("./Width").text) # in pixels
image.height = int(xml_node.find("./Height").text) # in pixels
image.plane_count = int(xml_node.find("./PlaneCount").text)
image.images = []
for i in range(image.plane_count):
img = xml_node.find("./Plane" + str(i))
raw = codecs.decode((img.find("./Data").text).encode("ascii"), "base64")
array1 = np.frombuffer(raw, dtype=np.uint16)
if any(array1):
item = self.gen_hspy_item_dict_basic()
data = array1.reshape((image.height, image.width))
detector_name = str(img.find("./Description").text)
item["data"] = data
item["axes"][0]["size"] = image.height
item["axes"][1]["size"] = image.width
item["metadata"]["General"] = {"title": detector_name}
item["metadata"]["Signal"] = {
"signal_type": detector_name,
"record_by": "image",
}
if overview:
item["metadata"]["Markers"] = {"overview": over_dict}
image.images.append(item)
return image
|
https://github.com/hyperspy/hyperspy/issues/1801
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-e233613e1d5d> in <module>()
----> 1 bk=hs.load('eds.bcf')
~/Documents/hyperspy/hyperspy/io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
~/Documents/hyperspy/hyperspy/io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
~/Documents/hyperspy/hyperspy/io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
~/Documents/hyperspy/hyperspy/io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in file_reader(filename, select_type, index, downsample, cutoff_at_kV, instrument, lazy)
1213
1214 # objectified bcf file:
-> 1215 obj_bcf = BCF_reader(filename, instrument=instrument)
1216 if select_type == 'image':
1217 return bcf_imagery(obj_bcf)
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in __init__(self, filename, instrument)
880 header_byte_str = header_file.get_as_BytesIO_string().getvalue()
881 hd_bt_str = fix_dec_patterns.sub(b'\\1.\\2', header_byte_str)
--> 882 self.header = HyperHeader(header_byte_str, self.available_indexes, instrument=instrument)
883 self.hypermap = {}
884
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in __init__(self, xml_str, indexes, instrument)
569 self._set_microscope(root)
570 self._get_mode(instrument)
--> 571 self._set_images(root)
572 self.elements = {}
573 self._set_elements(root)
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in _set_images(self, root)
692 if not(n.get('Name')):
693 image_node = n
--> 694 self.image = self._parse_image(image_node)
695 if self.version == 2:
696 overview_node = root.findall(
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in _parse_image(self, xml_node, overview)
671 if any(array1):
672 item = self.gen_hspy_item_dict_basic()
--> 673 data = array1.reshape((image.height, image.width))
674 detector_name = str(img.find('./Description').text)
675 item['data'] = data
ValueError: cannot reshape array of size 114688 into shape (448,512)
|
ValueError
|
def _parse_image(self, xml_node, overview=False):
"""parse image from bruker xml image node."""
if overview:
rect_node = xml_node.find(
"./ChildClassInstances"
"/ClassInstance["
# "@Type='TRTRectangleOverlayElement' and "
"@Name='Map']/TRTSolidOverlayElement/"
"TRTBasicLineOverlayElement/TRTOverlayElement"
)
if rect_node is not None:
over_rect = dictionarize(rect_node)["TRTOverlayElement"]["Rect"]
rect = {
"y1": over_rect["Top"] * self.y_res,
"x1": over_rect["Left"] * self.x_res,
"y2": over_rect["Bottom"] * self.y_res,
"x2": over_rect["Right"] * self.x_res,
}
over_dict = {
"marker_type": "Rectangle",
"plot_on_signal": True,
"data": rect,
"marker_properties": {"color": "yellow", "linewidth": 2},
}
image = Container()
image.width = int(xml_node.find("./Width").text) # in pixels
image.height = int(xml_node.find("./Height").text) # in pixels
image.dtype = "u" + xml_node.find("./ItemSize").text # in bytes ('u1','u2','u4')
image.plane_count = int(xml_node.find("./PlaneCount").text)
image.images = []
for i in range(image.plane_count):
img = xml_node.find("./Plane" + str(i))
raw = codecs.decode((img.find("./Data").text).encode("ascii"), "base64")
array1 = np.frombuffer(raw, dtype=image.dtype)
if any(array1):
item = self.gen_hspy_item_dict_basic()
data = array1.reshape((image.height, image.width))
desc = img.find("./Description")
item["data"] = data
item["axes"][0]["size"] = image.height
item["axes"][1]["size"] = image.width
item["metadata"]["Signal"] = {"record_by": "image"}
item["metadata"]["General"] = {}
if desc is not None:
item["metadata"]["General"]["title"] = str(desc.text)
if overview and (rect_node is not None):
item["metadata"]["Markers"] = {"overview": over_dict}
image.images.append(item)
return image
|
def _parse_image(self, xml_node, overview=False):
"""parse image from bruker xml image node."""
if overview:
rect_node = xml_node.find(
"./ChildClassInstances"
"/ClassInstance["
# "@Type='TRTRectangleOverlayElement' and "
"@Name='Map']/TRTSolidOverlayElement/"
"TRTBasicLineOverlayElement/TRTOverlayElement"
)
if rect_node is not None:
over_rect = dictionarize(rect_node)["TRTOverlayElement"]["Rect"]
rect = {
"y1": over_rect["Top"] * self.y_res,
"x1": over_rect["Left"] * self.x_res,
"y2": over_rect["Bottom"] * self.y_res,
"x2": over_rect["Right"] * self.x_res,
}
over_dict = {
"marker_type": "Rectangle",
"plot_on_signal": True,
"data": rect,
"marker_properties": {"color": "yellow", "linewidth": 2},
}
image = Container()
image.width = int(xml_node.find("./Width").text) # in pixels
image.height = int(xml_node.find("./Height").text) # in pixels
image.plane_count = int(xml_node.find("./PlaneCount").text)
image.images = []
for i in range(image.plane_count):
img = xml_node.find("./Plane" + str(i))
raw = codecs.decode((img.find("./Data").text).encode("ascii"), "base64")
array1 = np.frombuffer(raw, dtype=np.uint16)
if any(array1):
item = self.gen_hspy_item_dict_basic()
data = array1.reshape((image.height, image.width))
detector_name = str(img.find("./Description").text)
item["data"] = data
item["axes"][0]["size"] = image.height
item["axes"][1]["size"] = image.width
item["metadata"]["General"] = {"title": detector_name}
item["metadata"]["Signal"] = {
"signal_type": detector_name,
"record_by": "image",
}
if overview and (rect_node is not None):
item["metadata"]["Markers"] = {"overview": over_dict}
image.images.append(item)
return image
|
https://github.com/hyperspy/hyperspy/issues/1801
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-e233613e1d5d> in <module>()
----> 1 bk=hs.load('eds.bcf')
~/Documents/hyperspy/hyperspy/io.py in load(filenames, signal_type, stack, stack_axis, new_axis_name, lazy, **kwds)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
~/Documents/hyperspy/hyperspy/io.py in <listcomp>(.0)
241 objects = [load_single_file(filename, lazy=lazy,
242 **kwds)
--> 243 for filename in filenames]
244
245 if len(objects) == 1:
~/Documents/hyperspy/hyperspy/io.py in load_single_file(filename, signal_type, **kwds)
284 reader=reader,
285 signal_type=signal_type,
--> 286 **kwds)
287
288
~/Documents/hyperspy/hyperspy/io.py in load_with_reader(filename, reader, signal_type, **kwds)
293 lazy = kwds.get('lazy', False)
294 file_data_list = reader.file_reader(filename,
--> 295 **kwds)
296 objects = []
297
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in file_reader(filename, select_type, index, downsample, cutoff_at_kV, instrument, lazy)
1213
1214 # objectified bcf file:
-> 1215 obj_bcf = BCF_reader(filename, instrument=instrument)
1216 if select_type == 'image':
1217 return bcf_imagery(obj_bcf)
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in __init__(self, filename, instrument)
880 header_byte_str = header_file.get_as_BytesIO_string().getvalue()
881 hd_bt_str = fix_dec_patterns.sub(b'\\1.\\2', header_byte_str)
--> 882 self.header = HyperHeader(header_byte_str, self.available_indexes, instrument=instrument)
883 self.hypermap = {}
884
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in __init__(self, xml_str, indexes, instrument)
569 self._set_microscope(root)
570 self._get_mode(instrument)
--> 571 self._set_images(root)
572 self.elements = {}
573 self._set_elements(root)
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in _set_images(self, root)
692 if not(n.get('Name')):
693 image_node = n
--> 694 self.image = self._parse_image(image_node)
695 if self.version == 2:
696 overview_node = root.findall(
~/Documents/hyperspy/hyperspy/io_plugins/bcf.py in _parse_image(self, xml_node, overview)
671 if any(array1):
672 item = self.gen_hspy_item_dict_basic()
--> 673 data = array1.reshape((image.height, image.width))
674 detector_name = str(img.find('./Description').text)
675 item['data'] = data
ValueError: cannot reshape array of size 114688 into shape (448,512)
|
ValueError
|
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
signal_range=None,
show_progressbar=None,
**kwargs,
):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics of the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal1D of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
signal_range : tuple of integers, tuple of floats. Optional
Will only search for the ZLP within the signal_range. If given
in integers, the range will be in index values. If given floats,
the range will be in spectrum values. Useful if there are features
in the spectrum which are more intense than the ZLP.
Default is searching in the whole signal.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Examples
--------
>>> s_ll = hs.signals.EELSSpectrum(np.zeros(1000))
>>> s_ll.data[100] = 100
>>> s_ll.align_zero_loss_peak()
Aligning both the lowloss signal and another signal
>>> s = hs.signals.EELSSpectrum(np.range(1000))
>>> s_ll.align_zero_loss_peak(also_align=[s])
Aligning within a narrow range of the lowloss signal
>>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments are passed to `align1D`. For
more information read its docstring.
"""
signal_range = signal_range_from_roi(signal_range)
def substract_from_offset(value, signals):
if isinstance(value, da.Array):
value = value.compute()
for signal in signals:
signal.axes_manager[-1].offset -= value
def estimate_zero_loss_peak_centre(s, mask, signal_range):
if signal_range:
zlpc = s.isig[
signal_range[0] : signal_range[1]
].estimate_zero_loss_peak_centre(mask=mask)
else:
zlpc = s.estimate_zero_loss_peak_centre(mask=mask)
return zlpc
zlpc = estimate_zero_loss_peak_centre(self, mask=mask, signal_range=signal_range)
mean_ = np.nanmean(zlpc.data)
if print_stats is True:
print()
print(underline("Initial ZLP position statistics"))
zlpc.print_summary_statistics()
for signal in also_align + [self]:
shift_array = -zlpc.data + mean_
if zlpc._lazy:
shift_array = shift_array.compute()
signal.shift1D(shift_array, show_progressbar=show_progressbar)
if calibrate is True:
zlpc = estimate_zero_loss_peak_centre(
self, mask=mask, signal_range=signal_range
)
substract_from_offset(np.nanmean(zlpc.data), also_align + [self])
if subpixel is False:
return
left, right = -3.0, 3.0
if calibrate is False:
mean_ = np.nanmean(
estimate_zero_loss_peak_centre(self, mask, signal_range).data
)
left += mean_
right += mean_
left = (
left if left > self.axes_manager[-1].axis[0] else self.axes_manager[-1].axis[0]
)
right = (
right
if right < self.axes_manager[-1].axis[-1]
else self.axes_manager[-1].axis[-1]
)
if self.axes_manager.navigation_size > 1:
self.align1D(
left,
right,
also_align=also_align,
show_progressbar=show_progressbar,
mask=mask,
**kwargs,
)
if calibrate is True:
zlpc = estimate_zero_loss_peak_centre(
self, mask=mask, signal_range=signal_range
)
substract_from_offset(np.nanmean(zlpc.data), also_align + [self])
|
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
signal_range=None,
show_progressbar=None,
**kwargs,
):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics of the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal1D of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
signal_range : tuple of integers, tuple of floats. Optional
Will only search for the ZLP within the signal_range. If given
in integers, the range will be in index values. If given floats,
the range will be in spectrum values. Useful if there are features
in the spectrum which are more intense than the ZLP.
Default is searching in the whole signal.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Examples
--------
>>> s_ll = hs.signals.EELSSpectrum(np.zeros(1000))
>>> s_ll.data[100] = 100
>>> s_ll.align_zero_loss_peak()
Aligning both the lowloss signal and another signal
>>> s = hs.signals.EELSSpectrum(np.range(1000))
>>> s_ll.align_zero_loss_peak(also_align=[s])
Aligning within a narrow range of the lowloss signal
>>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments are passed to `align1D`. For
more information read its docstring.
"""
signal_range = signal_range_from_roi(signal_range)
def substract_from_offset(value, signals):
if isinstance(value, da.Array):
value = value.compute()
for signal in signals:
signal.axes_manager[-1].offset -= value
def estimate_zero_loss_peak_centre(s, mask, signal_range):
if signal_range:
zlpc = s.isig[
signal_range[0] : signal_range[1]
].estimate_zero_loss_peak_centre(mask=mask)
else:
zlpc = s.estimate_zero_loss_peak_centre(mask=mask)
return zlpc
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
mean_ = without_nans(zlpc.data).mean()
if print_stats is True:
print()
print(underline("Initial ZLP position statistics"))
zlpc.print_summary_statistics()
for signal in also_align + [self]:
signal.shift1D(-zlpc.data + mean_, show_progressbar=show_progressbar)
if calibrate is True:
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
substract_from_offset(without_nans(zlpc.data).mean(), also_align + [self])
if subpixel is False:
return
left, right = -3.0, 3.0
if calibrate is False:
mean_ = without_nans(
estimate_zero_loss_peak_centre(self, mask, signal_range).data
).mean()
left += mean_
right += mean_
left = (
left if left > self.axes_manager[-1].axis[0] else self.axes_manager[-1].axis[0]
)
right = (
right
if right < self.axes_manager[-1].axis[-1]
else self.axes_manager[-1].axis[-1]
)
if self.axes_manager.navigation_size > 1:
self.align1D(
left,
right,
also_align=also_align,
show_progressbar=show_progressbar,
**kwargs,
)
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
if calibrate is True:
substract_from_offset(without_nans(zlpc.data).mean(), also_align + [self])
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def kramers_kronig_analysis(
self, zlp=None, iterations=1, n=None, t=None, delta=0.5, full_output=False
):
"""Calculate the complex
dielectric function from a single scattering distribution (SSD) using
the Kramers-Kronig relations.
It uses the FFT method as in [Egerton2011]_. The SSD is an
EELSSpectrum instance containing SSD low-loss EELS with no zero-loss
peak. The internal loop is devised to approximately subtract the
surface plasmon contribution supposing an unoxidized planar surface and
neglecting coupling between the surfaces. This method does not account
for retardation effects, instrumental broading and surface plasmon
excitation in particles.
Note that either refractive index or thickness are required.
If both are None or if both are provided an exception is raised.
Parameters
----------
zlp: {None, number, Signal1D}
ZLP intensity. It is optional (can be None) if `t` is None and `n`
is not None and the thickness estimation is not required. If `t`
is not None, the ZLP is required to perform the normalization and
if `t` is not None, the ZLP is required to calculate the thickness.
If the ZLP is the same for all spectra, the integral of the ZLP
can be provided as a number. Otherwise, if the ZLP intensity is not
the same for all spectra, it can be provided as i) a Signal1D
of the same dimensions as the current signal containing the ZLP
spectra for each location ii) a BaseSignal of signal dimension 0
and navigation_dimension equal to the current signal containing the
integrated ZLP intensity.
iterations: int
Number of the iterations for the internal loop to remove the
surface plasmon contribution. If 1 the surface plasmon contribution
is not estimated and subtracted (the default is 1).
n: {None, float}
The medium refractive index. Used for normalization of the
SSD to obtain the energy loss function. If given the thickness
is estimated and returned. It is only required when `t` is None.
t: {None, number, Signal1D}
The sample thickness in nm. Used for normalization of the
SSD to obtain the energy loss function. It is only required when
`n` is None. If the thickness is the same for all spectra it can be
given by a number. Otherwise, it can be provided as a BaseSignal
with signal dimension 0 and navigation_dimension equal to the
current signal.
delta : float
A small number (0.1-0.5 eV) added to the energy axis in
specific steps of the calculation the surface loss correction to
improve stability.
full_output : bool
If True, return a dictionary that contains the estimated
thickness if `t` is None and the estimated surface plasmon
excitation and the spectrum corrected from surface plasmon
excitations if `iterations` > 1.
Returns
-------
eps: DielectricFunction instance
The complex dielectric function results,
$\epsilon = \epsilon_1 + i*\epsilon_2$,
contained in an DielectricFunction instance.
output: Dictionary (optional)
A dictionary of optional outputs with the following keys:
``thickness``
The estimated thickness in nm calculated by normalization of
the SSD (only when `t` is None)
``surface plasmon estimation``
The estimated surface plasmon excitation (only if
`iterations` > 1.)
Raises
------
ValuerError
If both `n` and `t` are undefined (None).
AttribureError
If the beam_energy or the collection semi-angle are not defined in
metadata.
Notes
-----
This method is based in Egerton's Matlab code [Egerton2011]_ with some
minor differences:
* The integrals are performed using the simpsom rule instead of using
a summation.
* The wrap-around problem when computing the ffts is workarounded by
padding the signal instead of substracting the reflected tail.
.. [Egerton2011] Ray Egerton, "Electron Energy-Loss
Spectroscopy in the Electron Microscope", Springer-Verlag, 2011.
"""
output = {}
if iterations == 1:
# In this case s.data is not modified so there is no need to make
# a deep copy.
s = self.isig[0.0:]
else:
s = self.isig[0.0:].deepcopy()
sorig = self.isig[0.0:]
# Avoid singularity at 0
if s.axes_manager.signal_axes[0].axis[0] == 0:
s = s.isig[1:]
sorig = self.isig[1:]
# Constants and units
me = constants.value("electron mass energy equivalent in MeV") * 1e3 # keV
# Mapped parameters
try:
e0 = s.metadata.Acquisition_instrument.TEM.beam_energy
except BaseException:
raise AttributeError(
"Please define the beam energy."
"You can do this e.g. by using the "
"set_microscope_parameters method"
)
try:
beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle
except BaseException:
raise AttributeError(
"Please define the collection semi-angle. "
"You can do this e.g. by using the "
"set_microscope_parameters method"
)
axis = s.axes_manager.signal_axes[0]
eaxis = axis.axis.copy()
if isinstance(zlp, hyperspy.signal.BaseSignal):
if (
zlp.axes_manager.navigation_dimension
== self.axes_manager.navigation_dimension
):
if zlp.axes_manager.signal_dimension == 0:
i0 = zlp.data
else:
i0 = zlp.integrate1D(axis.index_in_axes_manager).data
else:
raise ValueError(
"The ZLP signal dimensions are not "
"compatible with the dimensions of the "
"low-loss signal"
)
# The following prevents errors if the signal is a single spectrum
if len(i0) != 1:
i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1))
elif isinstance(zlp, numbers.Number):
i0 = zlp
else:
raise ValueError(
"The zero-loss peak input is not valid, it must be\
in the BaseSignal class or a Number."
)
if isinstance(t, hyperspy.signal.BaseSignal):
if (
t.axes_manager.navigation_dimension
== self.axes_manager.navigation_dimension
) and (t.axes_manager.signal_dimension == 0):
t = t.data
t = t.reshape(np.insert(t.shape, axis.index_in_array, 1))
else:
raise ValueError(
"The thickness signal dimensions are not "
"compatible with the dimensions of the "
"low-loss signal"
)
elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):
raise ValueError(
"thickness must be a HyperSpy signal or a number, not a numpy array."
)
# Slicer to get the signal data from 0 to axis.size
slicer = s.axes_manager._get_data_slice(
[
(axis.index_in_array, slice(None, axis.size)),
]
)
# Kinetic definitions
ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2
tgt = e0 * (2 * me + e0) / (me + e0)
rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)
for io in range(iterations):
# Calculation of the ELF by normalization of the SSD
# Norm(SSD) = Imag(-1/epsilon) (Energy Loss Funtion, ELF)
# We start by the "angular corrections"
Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale
if n is None and t is None:
raise ValueError(
"The thickness and the refractive index are "
"not defined. Please provide one of them."
)
elif n is not None and t is not None:
raise ValueError(
"Please provide the refractive index OR the "
"thickness information, not both"
)
elif n is not None:
# normalize using the refractive index.
K = (Im / eaxis).sum(axis=axis.index_in_array) * axis.scale
K = (K / (np.pi / 2) / (1 - 1.0 / n**2)).reshape(
np.insert(K.shape, axis.index_in_array, 1)
)
# Calculate the thickness only if possible and required
if zlp is not None and (full_output is True or iterations > 1):
te = 332.5 * K * ke / i0
if full_output is True:
output["thickness"] = te
elif t is not None:
if zlp is None:
raise ValueError(
"The ZLP must be provided when the "
"thickness is used for normalization."
)
# normalize using the thickness
K = t * i0 / (332.5 * ke)
te = t
Im = Im / K
# Kramers Kronig Transform:
# We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT
# Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490
# Use a size that is a power of two to speed up the fft and
# make it double the closest upper value to workaround the
# wrap-around problem.
esize = 2 * closest_power_of_two(axis.size)
q = -2 * np.fft.fft(Im, esize, axis.index_in_array).imag / esize
q[slicer] *= -1
q = np.fft.fft(q, axis=axis.index_in_array)
# Final touch, we have Re(1/eps)
Re = q[slicer].real + 1
# Egerton does this to correct the wrap-around problem, but in our
# case this is not necessary because we compute the fft on an
# extended and padded spectrum to avoid this problem.
# Re=real(q)
# Tail correction
# vm=Re[axis.size-1]
# Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /
# (axis.size*2-arange(0,axis.size-1)))**2)
# Re[axis.size:]=1+(0.5*vm*((axis.size-1) /
# (axis.size+arange(0,axis.size)))**2)
# Epsilon appears:
# We calculate the real and imaginary parts of the CDF
e1 = Re / (Re**2 + Im**2)
e2 = Im / (Re**2 + Im**2)
if iterations > 1 and zlp is not None:
# Surface losses correction:
# Calculates the surface ELF from a vaccumm border effect
# A simulated surface plasmon is subtracted from the ELF
Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2**2) - Im
adep = tgt / (eaxis + delta) * np.arctan(
beta * tgt / axis.axis
) - beta / 1000.0 / (beta**2 + axis.axis**2.0 / tgt**2)
Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale
s.data = sorig.data - Srfint
_logger.debug("Iteration number: %d / %d", io + 1, iterations)
if iterations == io + 1 and full_output is True:
sp = sorig._deepcopy_with_new_data(Srfint)
sp.metadata.General.title += " estimated surface plasmon excitation."
output["surface plasmon estimation"] = sp
del sp
del Srfint
eps = s._deepcopy_with_new_data(e1 + e2 * 1j)
del s
eps.set_signal_type("DielectricFunction")
eps.metadata.General.title = (
self.metadata.General.title + "dielectric function "
"(from Kramers-Kronig analysis)"
)
if eps.tmp_parameters.has_item("filename"):
eps.tmp_parameters.filename = (
self.tmp_parameters.filename + "_CDF_after_Kramers_Kronig_transform"
)
if "thickness" in output:
thickness = eps._get_navigation_signal(
data=te[self.axes_manager._get_data_slice([(axis.index_in_array, 0)])]
)
thickness.metadata.General.title = (
self.metadata.General.title + " thickness "
"(calculated using Kramers-Kronig analysis)"
)
output["thickness"] = thickness
if full_output is False:
return eps
else:
return eps, output
|
def kramers_kronig_analysis(
self, zlp=None, iterations=1, n=None, t=None, delta=0.5, full_output=False
):
"""Calculate the complex
dielectric function from a single scattering distribution (SSD) using
the Kramers-Kronig relations.
It uses the FFT method as in [Egerton2011]_. The SSD is an
EELSSpectrum instance containing SSD low-loss EELS with no zero-loss
peak. The internal loop is devised to approximately subtract the
surface plasmon contribution supposing an unoxidized planar surface and
neglecting coupling between the surfaces. This method does not account
for retardation effects, instrumental broading and surface plasmon
excitation in particles.
Note that either refractive index or thickness are required.
If both are None or if both are provided an exception is raised.
Parameters
----------
zlp: {None, number, Signal1D}
ZLP intensity. It is optional (can be None) if `t` is None and `n`
is not None and the thickness estimation is not required. If `t`
is not None, the ZLP is required to perform the normalization and
if `t` is not None, the ZLP is required to calculate the thickness.
If the ZLP is the same for all spectra, the integral of the ZLP
can be provided as a number. Otherwise, if the ZLP intensity is not
the same for all spectra, it can be provided as i) a Signal1D
of the same dimensions as the current signal containing the ZLP
spectra for each location ii) a BaseSignal of signal dimension 0
and navigation_dimension equal to the current signal containing the
integrated ZLP intensity.
iterations: int
Number of the iterations for the internal loop to remove the
surface plasmon contribution. If 1 the surface plasmon contribution
is not estimated and subtracted (the default is 1).
n: {None, float}
The medium refractive index. Used for normalization of the
SSD to obtain the energy loss function. If given the thickness
is estimated and returned. It is only required when `t` is None.
t: {None, number, Signal1D}
The sample thickness in nm. Used for normalization of the
SSD to obtain the energy loss function. It is only required when
`n` is None. If the thickness is the same for all spectra it can be
given by a number. Otherwise, it can be provided as a BaseSignal
with signal dimension 0 and navigation_dimension equal to the
current signal.
delta : float
A small number (0.1-0.5 eV) added to the energy axis in
specific steps of the calculation the surface loss correction to
improve stability.
full_output : bool
If True, return a dictionary that contains the estimated
thickness if `t` is None and the estimated surface plasmon
excitation and the spectrum corrected from surface plasmon
excitations if `iterations` > 1.
Returns
-------
eps: DielectricFunction instance
The complex dielectric function results,
$\epsilon = \epsilon_1 + i*\epsilon_2$,
contained in an DielectricFunction instance.
output: Dictionary (optional)
A dictionary of optional outputs with the following keys:
``thickness``
The estimated thickness in nm calculated by normalization of
the SSD (only when `t` is None)
``surface plasmon estimation``
The estimated surface plasmon excitation (only if
`iterations` > 1.)
Raises
------
ValuerError
If both `n` and `t` are undefined (None).
AttribureError
If the beam_energy or the collection semi-angle are not defined in
metadata.
Notes
-----
This method is based in Egerton's Matlab code [Egerton2011]_ with some
minor differences:
* The integrals are performed using the simpsom rule instead of using
a summation.
* The wrap-around problem when computing the ffts is workarounded by
padding the signal instead of substracting the reflected tail.
.. [Egerton2011] Ray Egerton, "Electron Energy-Loss
Spectroscopy in the Electron Microscope", Springer-Verlag, 2011.
"""
output = {}
if iterations == 1:
# In this case s.data is not modified so there is no need to make
# a deep copy.
s = self.isig[0.0:]
else:
s = self.isig[0.0:].deepcopy()
sorig = self.isig[0.0:]
# Avoid singularity at 0
if s.axes_manager.signal_axes[0].axis[0] == 0:
s = s.isig[1:]
sorig = self.isig[1:]
# Constants and units
me = constants.value("electron mass energy equivalent in MeV") * 1e3 # keV
# Mapped parameters
try:
e0 = s.metadata.Acquisition_instrument.TEM.beam_energy
except:
raise AttributeError(
"Please define the beam energy."
"You can do this e.g. by using the "
"set_microscope_parameters method"
)
try:
beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle
except:
raise AttributeError(
"Please define the collection semi-angle. "
"You can do this e.g. by using the "
"set_microscope_parameters method"
)
axis = s.axes_manager.signal_axes[0]
eaxis = axis.axis.copy()
if isinstance(zlp, hyperspy.signal.BaseSignal):
if (
zlp.axes_manager.navigation_dimension
== self.axes_manager.navigation_dimension
):
if zlp.axes_manager.signal_dimension == 0:
i0 = zlp.data
else:
i0 = zlp.integrate1D(axis.index_in_axes_manager).data
else:
raise ValueError(
"The ZLP signal dimensions are not "
"compatible with the dimensions of the "
"low-loss signal"
)
# The following prevents errors if the signal is a single spectrum
if len(i0) != 1:
i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1))
elif isinstance(zlp, numbers.Number):
i0 = zlp
else:
raise ValueError(
"The zero-loss peak input is not valid, it must be\
in the BaseSignal class or a Number."
)
if isinstance(t, hyperspy.signal.BaseSignal):
if (
t.axes_manager.navigation_dimension
== self.axes_manager.navigation_dimension
) and (t.axes_manager.signal_dimension == 0):
t = t.data
t = t.reshape(np.insert(t.shape, axis.index_in_array, 1))
else:
raise ValueError(
"The thickness signal dimensions are not "
"compatible with the dimensions of the "
"low-loss signal"
)
elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):
raise ValueError(
"thickness must be a HyperSpy signal or a number, not a numpy array."
)
# Slicer to get the signal data from 0 to axis.size
slicer = s.axes_manager._get_data_slice(
[
(axis.index_in_array, slice(None, axis.size)),
]
)
# Kinetic definitions
ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2
tgt = e0 * (2 * me + e0) / (me + e0)
rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)
for io in range(iterations):
# Calculation of the ELF by normalization of the SSD
# Norm(SSD) = Imag(-1/epsilon) (Energy Loss Funtion, ELF)
# We start by the "angular corrections"
Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale
if n is None and t is None:
raise ValueError(
"The thickness and the refractive index are "
"not defined. Please provide one of them."
)
elif n is not None and t is not None:
raise ValueError(
"Please provide the refractive index OR the "
"thickness information, not both"
)
elif n is not None:
# normalize using the refractive index.
K = (Im / eaxis).sum(axis=axis.index_in_array) * axis.scale
K = (K / (np.pi / 2) / (1 - 1.0 / n**2)).reshape(
np.insert(K.shape, axis.index_in_array, 1)
)
# Calculate the thickness only if possible and required
if zlp is not None and (full_output is True or iterations > 1):
te = 332.5 * K * ke / i0
if full_output is True:
output["thickness"] = te
elif t is not None:
if zlp is None:
raise ValueError(
"The ZLP must be provided when the "
"thickness is used for normalization."
)
# normalize using the thickness
K = t * i0 / (332.5 * ke)
te = t
Im = Im / K
# Kramers Kronig Transform:
# We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT
# Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490
# Use a size that is a power of two to speed up the fft and
# make it double the closest upper value to workaround the
# wrap-around problem.
esize = 2 * closest_power_of_two(axis.size)
q = -2 * np.fft.fft(Im, esize, axis.index_in_array).imag / esize
q[slicer] *= -1
q = np.fft.fft(q, axis=axis.index_in_array)
# Final touch, we have Re(1/eps)
Re = q[slicer].real + 1
# Egerton does this to correct the wrap-around problem, but in our
# case this is not necessary because we compute the fft on an
# extended and padded spectrum to avoid this problem.
# Re=real(q)
# Tail correction
# vm=Re[axis.size-1]
# Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /
# (axis.size*2-arange(0,axis.size-1)))**2)
# Re[axis.size:]=1+(0.5*vm*((axis.size-1) /
# (axis.size+arange(0,axis.size)))**2)
# Epsilon appears:
# We calculate the real and imaginary parts of the CDF
e1 = Re / (Re**2 + Im**2)
e2 = Im / (Re**2 + Im**2)
if iterations > 1 and zlp is not None:
# Surface losses correction:
# Calculates the surface ELF from a vaccumm border effect
# A simulated surface plasmon is subtracted from the ELF
Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2**2) - Im
adep = tgt / (eaxis + delta) * np.arctan(
beta * tgt / axis.axis
) - beta / 1000.0 / (beta**2 + axis.axis**2.0 / tgt**2)
Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale
s.data = sorig.data - Srfint
_logger.debug("Iteration number: %d / %d", io + 1, iterations)
if iterations == io + 1 and full_output is True:
sp = sorig._deepcopy_with_new_data(Srfint)
sp.metadata.General.title += " estimated surface plasmon excitation."
output["surface plasmon estimation"] = sp
del sp
del Srfint
eps = s._deepcopy_with_new_data(e1 + e2 * 1j)
del s
eps.set_signal_type("DielectricFunction")
eps.metadata.General.title = (
self.metadata.General.title + "dielectric function "
"(from Kramers-Kronig analysis)"
)
if eps.tmp_parameters.has_item("filename"):
eps.tmp_parameters.filename = (
self.tmp_parameters.filename + "_CDF_after_Kramers_Kronig_transform"
)
if "thickness" in output:
thickness = eps._get_navigation_signal(
data=te[self.axes_manager._get_data_slice([(axis.index_in_array, 0)])]
)
thickness.metadata.General.title = (
self.metadata.General.title + " thickness "
"(calculated using Kramers-Kronig analysis)"
)
output["thickness"] = thickness
if full_output is False:
return eps
else:
return eps, output
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get("mask", None)
ref = kwargs.get("ref", None)
interpolate = kwargs.get("interpolate", True)
ip = kwargs.get("ip", 5)
data_slice = kwargs.get("data_slice", slice(None))
if bool(mask):
return np.float32(np.nan)
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, "full")) - len(ref) + 1
|
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get("mask", None)
ref = kwargs.get("ref", None)
interpolate = kwargs.get("interpolate", True)
ip = kwargs.get("ip", 5)
data_slice = kwargs.get("data_slice", slice(None))
if bool(mask):
return np.nan
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, "full")) - len(ref) + 1
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def _shift1D(data, **kwargs):
shift = kwargs.get("shift", 0.0)
original_axis = kwargs.get("original_axis", None)
fill_value = kwargs.get("fill_value", np.nan)
kind = kwargs.get("kind", "linear")
offset = kwargs.get("offset", 0.0)
scale = kwargs.get("scale", 1.0)
size = kwargs.get("size", 2)
if np.isnan(shift) or shift == 0:
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(
original_axis, data, bounds_error=False, fill_value=fill_value, kind=kind
)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
|
def _shift1D(data, **kwargs):
shift = kwargs.get("shift", 0.0)
original_axis = kwargs.get("original_axis", None)
fill_value = kwargs.get("fill_value", np.nan)
kind = kwargs.get("kind", "linear")
offset = kwargs.get("offset", 0.0)
scale = kwargs.get("scale", 1.0)
size = kwargs.get("size", 2)
if np.isnan(shift):
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(
original_axis, data, bounds_error=False, fill_value=fill_value, kind=kind
)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def estimate_shift1D(
self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
parallel=None,
show_progressbar=None,
):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : {int | float | None}
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : BaseSignal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
parallel : {None, bool}
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Returns
-------
An array with the result of the estimation in the axis units.
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (("mask", mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,
)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
if self._lazy:
shift_array = shift_array.compute()
return shift_array
|
def estimate_shift1D(
self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
parallel=None,
show_progressbar=None,
):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : {int | float | None}
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : BaseSignal of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
parallel : {None, bool}
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Returns
-------
An array with the result of the estimation in the axis units.
Raises
------
SignalDimensionError if the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (("mask", mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
mask=None,
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,
)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
return shift_array
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def fetch(self):
"""Fetch the stored value and std attributes.
See Also
--------
store_current_value_in_array, assign_current_value_to_all
"""
indices = self._axes_manager.indices[::-1]
# If it is a single spectrum indices is ()
if not indices:
indices = (0,)
if self.map["is_set"][indices]:
value = self.map["values"][indices]
std = self.map["std"][indices]
if isinstance(value, dArray):
value = value.compute()
if isinstance(std, dArray):
std = std.compute()
self.value = value
self.std = std
|
def fetch(self):
"""Fetch the stored value and std attributes.
See Also
--------
store_current_value_in_array, assign_current_value_to_all
"""
indices = self._axes_manager.indices[::-1]
# If it is a single spectrum indices is ()
if not indices:
indices = (0,)
if self.map["is_set"][indices]:
self.value = self.map["values"][indices]
self.std = self.map["std"][indices]
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def reconstruct_object(flags, value):
"""Reconstructs the value (if necessary) after having saved it in a
dictionary
"""
if not isinstance(flags, list):
flags = parse_flag_string(flags)
if "sig" in flags:
if isinstance(value, dict):
from hyperspy.signal import BaseSignal
value = BaseSignal(**value)
value._assign_subclass()
return value
if "fn" in flags:
ifdill, thing = value
if ifdill is None:
return thing
if ifdill in [True, "True", b"True"]:
return dill.loads(thing)
# should not be reached
raise ValueError("The object format is not recognized")
if isinstance(value, Array):
value = value.compute()
return value
|
def reconstruct_object(flags, value):
"""Reconstructs the value (if necessary) after having saved it in a
dictionary
"""
if not isinstance(flags, list):
flags = parse_flag_string(flags)
if "sig" in flags:
if isinstance(value, dict):
from hyperspy.signal import BaseSignal
value = BaseSignal(**value)
value._assign_subclass()
return value
if "fn" in flags:
ifdill, thing = value
if ifdill is None:
return thing
if ifdill in [True, "True", b"True"]:
return dill.loads(thing)
# should not be reached
raise ValueError("The object format is not recognized")
return value
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def _slice_target(target, dims, both_slices, slice_nav=None, issignal=False):
"""Slices the target if appropriate
Parameters
----------
target : object
Target object
dims : tuple
(navigation_dimensions, signal_dimensions) of the original object that
is sliced
both_slices : tuple
(original_slices, array_slices) of the operation that is performed
slice_nav : {bool, None}
if None, target is returned as-is. Otherwise navigation and signal
dimensions are sliced for True and False values respectively.
issignal : bool
if the target is signal and should be sliced as one
"""
if slice_nav is None:
return target
if target is None:
return None
nav_dims, sig_dims = dims
slices, array_slices = both_slices
if slice_nav is True: # check explicitly for safety
if issignal:
return target.inav[slices]
sl = tuple(array_slices[:nav_dims])
if isinstance(target, np.ndarray):
return np.atleast_1d(target[sl])
if isinstance(target, dArray):
return target[sl]
raise ValueError(
"tried to slice with navigation dimensions, but was neither a "
"signal nor an array"
)
if slice_nav is False: # check explicitly
if issignal:
return target.isig[slices]
sl = tuple(array_slices[-sig_dims:])
if isinstance(target, np.ndarray):
return np.atleast_1d(target[sl])
if isinstance(target, dArray):
return target[sl]
raise ValueError(
"tried to slice with navigation dimensions, but was neither a "
"signal nor an array"
)
|
def _slice_target(target, dims, both_slices, slice_nav=None, issignal=False):
"""Slices the target if appropriate
Parameters
----------
target : object
Target object
dims : tuple
(navigation_dimensions, signal_dimensions) of the original object that
is sliced
both_slices : tuple
(original_slices, array_slices) of the operation that is performed
slice_nav : {bool, None}
if None, target is returned as-is. Otherwise navigation and signal
dimensions are sliced for True and False values respectively.
issignal : bool
if the target is signal and should be sliced as one
"""
if slice_nav is None:
return target
if target is None:
return None
nav_dims, sig_dims = dims
slices, array_slices = both_slices
if slice_nav is True: # check explicitly for safety
if issignal:
return target.inav[slices]
if isinstance(target, np.ndarray):
return np.atleast_1d(target[tuple(array_slices[:nav_dims])])
raise ValueError(
"tried to slice with navigation dimensions, but was neither a "
"signal nor an array"
)
if slice_nav is False: # check explicitly
if issignal:
return target.isig[slices]
if isinstance(target, np.ndarray):
return np.atleast_1d(target[tuple(array_slices[-sig_dims:])])
raise ValueError(
"tried to slice with navigation dimensions, but was neither a "
"signal nor an array"
)
|
https://github.com/hyperspy/hyperspy/issues/1592
|
s
<LazyEELSSpectrum, title: EELS Spectrum Image 0eV, dimensions: (3, 3|2048)>
s.align_zero_loss_peak(True) # Or False
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-4-8925d1fde6ab> in <module>()
----> 1 s.align_zero_loss_peak(True) # or False
c:\users\thomasaar\documents\hyperspy\hyperspy\_signals\eels.py in align_zero_loss_peak(self, calibrate, also_align, print_stats, subpixel, mask, signal_range, show_progressbar, **kwargs)
266
267 zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
--> 268 mean_ = without_nans(zlpc.data).mean()
269 if print_stats is True:
270 print()
c:\users\thomasaar\documents\hyperspy\hyperspy\misc\utils.py in without_nans(data)
756
757 def without_nans(data):
--> 758 return data[~np.isnan(data)]
759
760
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\core.py in __getitem__(self, index)
1239 return self
1240
-> 1241 dsk, chunks = slice_array(out, self.name, self.chunks, index)
1242
1243 dsk2 = sharedict.merge(self.dask, (out, dsk))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_array(out_name, in_name, blockdims, index)
137
138 # Pass down to next function
--> 139 dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
140
141 bd_out = tuple(map(tuple, bd_out))
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_with_newaxes(out_name, in_name, blockdims, index)
159
160 # Pass down and do work
--> 161 dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
162
163 if where_none:
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in slice_wrap_lists(out_name, in_name, blockdims, index)
200 raise IndexError("Too many indices for array")
201 for bd, i in zip(blockdims, index):
--> 202 check_index(i, sum(bd))
203
204 # Change indices like -1 to 9
C:\Users\thomasaar\AppData\Local\Continuum\Anaconda3\lib\site-packages\dask\array\slicing.py in check_index(ind, dimension)
711 x = np.array(ind)
712 if (x >= dimension).any() or (x <= -dimension).any():
--> 713 raise IndexError("Index out of bounds %s" % dimension)
714 elif isinstance(ind, slice):
715 return
IndexError: Index out of bounds 200
|
IndexError
|
def __init__(self, xml_str, indexes, instrument=None):
root = ET.fromstring(xml_str)
root = root.find("./ClassInstance[@Type='TRTSpectrumDatabase']")
try:
self.name = str(root.attrib["Name"])
except KeyError:
self.name = "Undefinded"
_logger.info("hypermap have no name. Giving it 'Undefined' name")
hd = root.find("./Header")
dt = datetime.strptime(
" ".join([str(hd.find("./Date").text), str(hd.find("./Time").text)]),
"%d.%m.%Y %H:%M:%S",
)
self.date = dt.date().isoformat()
self.time = dt.time().isoformat()
self.version = int(hd.find("./FileVersion").text)
# fill the sem and stage attributes:
self._set_microscope(root)
self._get_mode(instrument)
self._set_images(root)
self.elements = {}
self._set_elements(root)
self.line_counter = interpret(root.find("./LineCounter").text)
self.channel_count = int(root.find("./ChCount").text)
self.mapping_count = int(root.find("./DetectorCount").text)
# self.channel_factors = {}
self.spectra_data = {}
self._set_sum_edx(root, indexes)
|
def __init__(self, xml_str, instrument=None):
root = ET.fromstring(xml_str)
root = root.find("./ClassInstance[@Type='TRTSpectrumDatabase']")
try:
self.name = str(root.attrib["Name"])
except KeyError:
self.name = "Undefinded"
_logger.info("hypermap have no name. Giving it 'Undefined' name")
hd = root.find("./Header")
dt = datetime.strptime(
" ".join([str(hd.find("./Date").text), str(hd.find("./Time").text)]),
"%d.%m.%Y %H:%M:%S",
)
self.date = dt.date().isoformat()
self.time = dt.time().isoformat()
self.version = int(hd.find("./FileVersion").text)
# fill the sem and stage attributes:
self._set_microscope(root)
self._get_mode(instrument)
self._set_images(root)
self.elements = {}
self._set_elements(root)
self.line_counter = interpret(root.find("./LineCounter").text)
self.channel_count = int(root.find("./ChCount").text)
self.mapping_count = int(root.find("./DetectorCount").text)
# self.channel_factors = {}
self.spectra_data = {}
self._set_sum_edx(root)
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
def _set_sum_edx(self, root, indexes):
for i in indexes:
spec_node = root.find("./SpectrumData{0}/ClassInstance".format(str(i)))
self.spectra_data[i] = EDXSpectrum(spec_node)
|
def _set_sum_edx(self, root):
for i in range(self.mapping_count):
spec_node = root.find("./SpectrumData{0}/ClassInstance".format(str(i)))
self.spectra_data[i] = EDXSpectrum(spec_node)
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
def __init__(self, filename, instrument=None):
SFS_reader.__init__(self, filename)
header_file = self.get_file("EDSDatabase/HeaderData")
self.available_indexes = []
for i in self.vfs["EDSDatabase"].keys():
if "SpectrumData" in i:
self.available_indexes.append(int(i[-1]))
self.def_index = min(self.available_indexes)
header_byte_str = header_file.get_as_BytesIO_string().getvalue()
self.header = HyperHeader(
header_byte_str, self.available_indexes, instrument=instrument
)
self.hypermap = {}
|
def __init__(self, filename, instrument=None):
SFS_reader.__init__(self, filename)
header_file = self.get_file("EDSDatabase/HeaderData")
header_byte_str = header_file.get_as_BytesIO_string().getvalue()
self.header = HyperHeader(header_byte_str, instrument=instrument)
self.hypermap = {}
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
def persistent_parse_hypermap(
self, index=None, downsample=None, cutoff_at_kV=None, lazy=False
):
"""Parse and assign the hypermap to the HyperMap instance.
Arguments:
index -- index of hypermap in bcf if v2 (default 0)
downsample -- downsampling factor of hypermap (default None)
cutoff_at_kV -- low pass cutoff value at keV (default None)
Method does not return anything, it adds the HyperMap instance to
self.hypermap dictionary.
See also:
HyperMap, parse_hypermap
"""
if index is None:
index = self.def_index
dwn = downsample
hypermap = self.parse_hypermap(
index=index, downsample=dwn, cutoff_at_kV=cutoff_at_kV, lazy=lazy
)
self.hypermap[index] = HyperMap(hypermap, self, index=index, downsample=dwn)
|
def persistent_parse_hypermap(
self, index=0, downsample=None, cutoff_at_kV=None, lazy=False
):
"""Parse and assign the hypermap to the HyperMap instance.
Arguments:
index -- index of hypermap in bcf if v2 (default 0)
downsample -- downsampling factor of hypermap (default None)
cutoff_at_kV -- low pass cutoff value at keV (default None)
Method does not return anything, it adds the HyperMap instance to
self.hypermap dictionary.
See also:
HyperMap, parse_hypermap
"""
dwn = downsample
hypermap = self.parse_hypermap(
index=index, downsample=dwn, cutoff_at_kV=cutoff_at_kV, lazy=lazy
)
self.hypermap[index] = HyperMap(hypermap, self, index=index, downsample=dwn)
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
def parse_hypermap(self, index=None, downsample=1, cutoff_at_kV=None, lazy=False):
"""Unpack the Delphi/Bruker binary spectral map and return
numpy array in memory efficient way.
Pure python/numpy implementation -- slow, or
cython/memoryview/numpy implimentation if compilied and present
(fast) is used.
Arguments:
index -- the index of hypermap in bcf if there is more than one
hyper map in file.
downsample -- downsampling factor (integer). Diferently than
block_reduce from skimage.measure, the parser populates
reduced array by suming results of pixels, thus having lower
memory requiriments. (default 1)
cutoff_at_kV -- value in keV to truncate the array at. Helps reducing
size of array. (default None)
lazy -- return dask.array (True) or numpy.array (False) (default False)
Returns:
numpy or dask array of bruker hypermap, with (y,x,E) shape.
"""
if index is None:
index = self.def_index
if type(cutoff_at_kV) in (int, float):
eds = self.header.get_spectra_metadata(index)
cutoff_chan = eds.energy_to_channel(cutoff_at_kV)
else:
cutoff_chan = None
if fast_unbcf:
fh = dd(self.get_file)("EDSDatabase/SpectrumData" + str(index)) # noqa
value = dd(unbcf_fast.parse_to_numpy)(
fh, # noqa
downsample=downsample,
cutoff=cutoff_chan,
description=False,
index=index,
)
if lazy:
shape, dtype = unbcf_fast.parse_to_numpy(
fh.compute(),
downsample=downsample,
cutoff=cutoff_chan,
description=True,
index=index,
)
res = da.from_delayed(value, shape=shape, dtype=dtype)
else:
res = value.compute()
return res
else:
value = dd(self.py_parse_hypermap)(
index=index,
downsample=downsample,
cutoff_at_channel=cutoff_chan,
description=False,
)
if lazy:
shape, dtype = self.py_parse_hypermap(
index=index,
downsample=downsample,
cutoff_at_channel=cutoff_chan,
description=True,
)
res = da.from_delayed(value, shape=shape, dtype=dtype)
else:
res = value.compute()
return res
|
def parse_hypermap(self, index=0, downsample=1, cutoff_at_kV=None, lazy=False):
"""Unpack the Delphi/Bruker binary spectral map and return
numpy array in memory efficient way.
Pure python/numpy implimentation -- slow, or
cython/memoryview/numpy implimentation if compilied and present
(fast) is used.
Arguments:
index -- the index of hypermap in bcf if there is more than one
hyper map in file.
downsample -- downsampling factor (integer). Diferently than
block_reduce from skimage.measure, the parser populates
reduced array by suming results of pixels, thus having lower
memory requiriments. (default 1)
cutoff_at_kV -- value in keV to truncate the array at. Helps reducing
size of array. (default None)
lazy -- return dask.array (True) or numpy.array (False) (default False)
Returns:
numpy or dask array of bruker hypermap, with (y,x,E) shape.
"""
if type(cutoff_at_kV) in (int, float):
eds = self.header.get_spectra_metadata()
cutoff_chan = eds.energy_to_channel(cutoff_at_kV)
else:
cutoff_chan = None
if fast_unbcf:
fh = dd(self.get_file)("EDSDatabase/SpectrumData" + str(index)) # noqa
value = dd(unbcf_fast.parse_to_numpy)(
fh, # noqa
downsample=downsample,
cutoff=cutoff_chan,
description=False,
)
if lazy:
shape, dtype = unbcf_fast.parse_to_numpy(
fh.compute(),
downsample=downsample,
cutoff=cutoff_chan,
description=True,
)
res = da.from_delayed(value, shape=shape, dtype=dtype)
else:
res = value.compute()
return res
else:
value = dd(self.py_parse_hypermap)(
index=0,
downsample=downsample,
cutoff_at_channel=cutoff_chan,
description=False,
)
if lazy:
shape, dtype = self.py_parse_hypermap(
index=0,
downsample=downsample,
cutoff_at_channel=cutoff_chan,
description=True,
)
res = da.from_delayed(value, shape=shape, dtype=dtype)
else:
res = value.compute()
return res
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
def py_parse_hypermap(
self,
index=None,
downsample=1,
cutoff_at_channel=None, # noqa
description=False,
):
"""Unpack the Delphi/Bruker binary spectral map and return
numpy array in memory efficient way using pure python implementation.
(Slow!)
The function is long and complicated because Delphi/Bruker array
packing is complicated. Whole parsing is done in one function/method
to reduce overhead from python function calls. For cleaner parsing
logic check out fast cython implementation at
hyperspy/io_plugins/unbcf_fast.pyx
The method is only meant to be used if for some
reason c (generated with cython) version of the parser is not compiled.
Arguments:
---------
index -- the index of hypermap in bcf if there is more than one
hyper map in file.
downsample -- downsampling factor (integer). Diferently than
block_reduce from skimage.measure, the parser populates
reduced array by suming results of pixels, thus having lower
memory requiriments. (default 1)
cutoff_at_kV -- value in keV to truncate the array at. Helps reducing
size of array. (default None)
Returns:
---------
numpy array of bruker hypermap, with (y,x,E) shape.
"""
if index is None:
index = self.def_index
# dict of nibbles to struct notation for reading:
st = {1: "B", 2: "B", 4: "H", 8: "I", 16: "Q"}
spectrum_file = self.get_file("EDSDatabase/SpectrumData" + str(index))
iter_data, size_chnk = spectrum_file.get_iter_and_properties()[:2]
if isinstance(cutoff_at_channel, int):
max_chan = cutoff_at_channel
else:
max_chan = self.header.estimate_map_channels(index=index)
depth = self.header.estimate_map_depth(
index=index, downsample=downsample, for_numpy=True
)
buffer1 = next(iter_data)
height, width = strct_unp("<ii", buffer1[:8])
dwn_factor = downsample
shape = (-(-height // dwn_factor), -(-width // dwn_factor), max_chan)
if description:
return shape, depth
# hyper map as very flat array:
vfa = np.zeros(shape[0] * shape[1] * shape[2], dtype=depth)
offset = 0x1A0
size = size_chnk
for line_cnt in range(height):
if (offset + 4) >= size:
size = size_chnk + size - offset
buffer1 = buffer1[offset:] + next(iter_data)
offset = 0
line_head = strct_unp("<i", buffer1[offset : offset + 4])[0]
offset += 4
for dummy1 in range(line_head):
if (offset + 22) >= size:
size = size_chnk + size - offset
buffer1 = buffer1[offset:] + next(iter_data)
offset = 0
# the pixel header contains such information:
# x index of pixel (uint32);
# number of channels for whole mapping (unit16);
# number of channels for pixel (uint16);
# dummy placehollder (same value in every known bcf) (32bit);
# flag distinguishing packing data type (16bit):
# 0 - 16bit packed pulses, 1 - 12bit packed pulses,
# >1 - instructively packed spectra;
# value which sometimes shows the size of packed data (uint16);
# number of pulses if pulse data are present (uint16) or
# additional pulses to the instructively packed data;
# packed data size (32bit) (without additional pulses)
# next header is after that amount of bytes;
x_pix, chan1, chan2, dummy1, flag, dummy_size1, n_of_pulses, data_size2 = (
strct_unp("<IHHIHHHI", buffer1[offset : offset + 22])
)
pix_idx = (x_pix // dwn_factor) + (
(-(-width // dwn_factor)) * (line_cnt // dwn_factor)
)
offset += 22
if (offset + data_size2) >= size:
buffer1 = buffer1[offset:] + next(iter_data)
size = size_chnk + size - offset
offset = 0
if flag == 0:
data1 = buffer1[offset : offset + data_size2]
arr16 = np.fromstring(data1, dtype=np.uint16)
pixel = np.bincount(arr16, minlength=chan1 - 1)
offset += data_size2
elif flag == 1: # and (chan1 != chan2)
# Unpack packed 12-bit data to 16-bit uints:
data1 = buffer1[offset : offset + data_size2]
switched_i2 = np.fromstring(data1, dtype="<u2").byteswap(True)
data2 = np.fromstring(switched_i2.tostring(), dtype=np.uint8).repeat(2)
mask = np.ones_like(data2, dtype=bool)
mask[0::6] = mask[5::6] = False
# Reinterpret expanded as 16-bit:
# string representation of array after switch will have
# always BE independently from endianess of machine
exp16 = np.fromstring(
data2[mask].tostring(), dtype=">u2", count=n_of_pulses
)
exp16[0::2] >>= 4 # Shift every second short by 4
exp16 &= np.uint16(0x0FFF) # Mask all shorts to 12bit
pixel = np.bincount(exp16, minlength=chan1 - 1)
offset += data_size2
else: # flag > 1
# Unpack instructively packed data to pixel channels:
pixel = []
the_end = offset + data_size2 - 4
while offset < the_end:
# this would work on py3
# size_p, channels = buffer1[offset:offset + 2]
# this is needed on py2:
size_p, channels = strct_unp("<BB", buffer1[offset : offset + 2])
offset += 2
if size_p == 0:
pixel += channels * [0]
else:
gain = strct_unp(
"<" + st[size_p * 2], buffer1[offset : offset + size_p]
)[0]
offset += size_p
if size_p == 1:
# special case with nibble switching
length = -(-channels // 2) # integer roof
# valid py3 code
# a = list(buffer1[offset:offset + length])
# this have to be used on py2:
a = strct_unp(
"<" + "B" * length, buffer1[offset : offset + length]
)
g = []
for i in a:
g += (i & 0x0F) + gain, (i >> 4) + gain
pixel += g[:channels]
else:
length = int(channels * size_p / 2)
temp = strct_unp(
"<" + channels * st[size_p],
buffer1[offset : offset + length],
)
pixel += [l + gain for l in temp]
offset += length
if chan2 < chan1:
rest = chan1 - chan2
pixel += rest * [0]
# additional data size:
if n_of_pulses > 0:
add_s = strct_unp("<I", buffer1[offset : offset + 4])[0]
offset += 4
if (offset + add_s) >= size:
buffer1 = buffer1[offset:] + next(iter_data)
size = size_chnk + size - offset
offset = 0
# the additional pulses:
add_pulses = strct_unp(
"<" + "H" * n_of_pulses, buffer1[offset : offset + add_s]
)
offset += add_s
for i in add_pulses:
pixel[i] += 1
else:
offset += 4
# if no downsampling is needed, or if it is first
# pixel encountered with downsampling on, then
# use assigment, which is ~4 times faster, than inplace add
if max_chan < chan1: # if pixel have more channels than we need
chan1 = max_chan
if dwn_factor == 1:
vfa[max_chan * pix_idx : chan1 + max_chan * pix_idx] = pixel[:chan1]
else:
vfa[max_chan * pix_idx : chan1 + max_chan * pix_idx] += pixel[:chan1]
vfa.resize((-(-height // dwn_factor), -(-width // dwn_factor), max_chan))
# check if array is signed, and convert to unsigned
if str(vfa.dtype)[0] == "i":
new_dtype = "".join(["u", str(vfa.dtype)])
vfa.dtype = new_dtype
return vfa
|
def py_parse_hypermap(
self,
index=0,
downsample=1,
cutoff_at_channel=None, # noqa
description=False,
):
"""Unpack the Delphi/Bruker binary spectral map and return
numpy array in memory efficient way using pure python implementation.
(Slow!)
The function is long and complicated because Delphi/Bruker array
packing is complicated. Whole parsing is done in one function/method
to reduce overhead from python function calls. For cleaner parsing
logic check out fast cython implementation at
hyperspy/io_plugins/unbcf_fast.pyx
The method is only meant to be used if for some
reason c (generated with cython) version of the parser is not compiled.
Arguments:
---------
index -- the index of hypermap in bcf if there is more than one
hyper map in file.
downsample -- downsampling factor (integer). Diferently than
block_reduce from skimage.measure, the parser populates
reduced array by suming results of pixels, thus having lower
memory requiriments. (default 1)
cutoff_at_kV -- value in keV to truncate the array at. Helps reducing
size of array. (default None)
Returns:
---------
numpy array of bruker hypermap, with (y,x,E) shape.
"""
# dict of nibbles to struct notation for reading:
st = {1: "B", 2: "B", 4: "H", 8: "I", 16: "Q"}
spectrum_file = self.get_file("EDSDatabase/SpectrumData" + str(index))
iter_data, size_chnk = spectrum_file.get_iter_and_properties()[:2]
if isinstance(cutoff_at_channel, int):
max_chan = cutoff_at_channel
else:
max_chan = self.header.estimate_map_channels(index=index)
depth = self.header.estimate_map_depth(
index=index, downsample=downsample, for_numpy=True
)
buffer1 = next(iter_data)
height, width = strct_unp("<ii", buffer1[:8])
dwn_factor = downsample
shape = (-(-height // dwn_factor), -(-width // dwn_factor), max_chan)
if description:
return shape, depth
# hyper map as very flat array:
vfa = np.zeros(shape[0] * shape[1] * shape[2], dtype=depth)
offset = 0x1A0
size = size_chnk
for line_cnt in range(height):
if (offset + 4) >= size:
size = size_chnk + size - offset
buffer1 = buffer1[offset:] + next(iter_data)
offset = 0
line_head = strct_unp("<i", buffer1[offset : offset + 4])[0]
offset += 4
for dummy1 in range(line_head):
if (offset + 22) >= size:
size = size_chnk + size - offset
buffer1 = buffer1[offset:] + next(iter_data)
offset = 0
# the pixel header contains such information:
# x index of pixel (uint32);
# number of channels for whole mapping (unit16);
# number of channels for pixel (uint16);
# dummy placehollder (same value in every known bcf) (32bit);
# flag distinguishing packing data type (16bit):
# 0 - 16bit packed pulses, 1 - 12bit packed pulses,
# >1 - instructively packed spectra;
# value which sometimes shows the size of packed data (uint16);
# number of pulses if pulse data are present (uint16) or
# additional pulses to the instructively packed data;
# packed data size (32bit) (without additional pulses)
# next header is after that amount of bytes;
x_pix, chan1, chan2, dummy1, flag, dummy_size1, n_of_pulses, data_size2 = (
strct_unp("<IHHIHHHI", buffer1[offset : offset + 22])
)
pix_idx = (x_pix // dwn_factor) + (
(-(-width // dwn_factor)) * (line_cnt // dwn_factor)
)
offset += 22
if (offset + data_size2) >= size:
buffer1 = buffer1[offset:] + next(iter_data)
size = size_chnk + size - offset
offset = 0
if flag == 0:
data1 = buffer1[offset : offset + data_size2]
arr16 = np.fromstring(data1, dtype=np.uint16)
pixel = np.bincount(arr16, minlength=chan1 - 1)
offset += data_size2
elif flag == 1: # and (chan1 != chan2)
# Unpack packed 12-bit data to 16-bit uints:
data1 = buffer1[offset : offset + data_size2]
switched_i2 = np.fromstring(data1, dtype="<u2").byteswap(True)
data2 = np.fromstring(switched_i2.tostring(), dtype=np.uint8).repeat(2)
mask = np.ones_like(data2, dtype=bool)
mask[0::6] = mask[5::6] = False
# Reinterpret expanded as 16-bit:
# string representation of array after switch will have
# always BE independently from endianess of machine
exp16 = np.fromstring(
data2[mask].tostring(), dtype=">u2", count=n_of_pulses
)
exp16[0::2] >>= 4 # Shift every second short by 4
exp16 &= np.uint16(0x0FFF) # Mask all shorts to 12bit
pixel = np.bincount(exp16, minlength=chan1 - 1)
offset += data_size2
else: # flag > 1
# Unpack instructively packed data to pixel channels:
pixel = []
the_end = offset + data_size2 - 4
while offset < the_end:
# this would work on py3
# size_p, channels = buffer1[offset:offset + 2]
# this is needed on py2:
size_p, channels = strct_unp("<BB", buffer1[offset : offset + 2])
offset += 2
if size_p == 0:
pixel += channels * [0]
else:
gain = strct_unp(
"<" + st[size_p * 2], buffer1[offset : offset + size_p]
)[0]
offset += size_p
if size_p == 1:
# special case with nibble switching
length = -(-channels // 2) # integer roof
# valid py3 code
# a = list(buffer1[offset:offset + length])
# this have to be used on py2:
a = strct_unp(
"<" + "B" * length, buffer1[offset : offset + length]
)
g = []
for i in a:
g += (i & 0x0F) + gain, (i >> 4) + gain
pixel += g[:channels]
else:
length = int(channels * size_p / 2)
temp = strct_unp(
"<" + channels * st[size_p],
buffer1[offset : offset + length],
)
pixel += [l + gain for l in temp]
offset += length
if chan2 < chan1:
rest = chan1 - chan2
pixel += rest * [0]
# additional data size:
if n_of_pulses > 0:
add_s = strct_unp("<I", buffer1[offset : offset + 4])[0]
offset += 4
if (offset + add_s) >= size:
buffer1 = buffer1[offset:] + next(iter_data)
size = size_chnk + size - offset
offset = 0
# the additional pulses:
add_pulses = strct_unp(
"<" + "H" * n_of_pulses, buffer1[offset : offset + add_s]
)
offset += add_s
for i in add_pulses:
pixel[i] += 1
else:
offset += 4
# if no downsampling is needed, or if it is first
# pixel encountered with downsampling on, then
# use assigment, which is ~4 times faster, than inplace add
if max_chan < chan1: # if pixel have more channels than we need
chan1 = max_chan
if dwn_factor == 1:
vfa[max_chan * pix_idx : chan1 + max_chan * pix_idx] = pixel[:chan1]
else:
vfa[max_chan * pix_idx : chan1 + max_chan * pix_idx] += pixel[:chan1]
vfa.resize((-(-height // dwn_factor), -(-width // dwn_factor), max_chan))
# check if array is signed, and convert to unsigned
if str(vfa.dtype)[0] == "i":
new_dtype = "".join(["u", str(vfa.dtype)])
vfa.dtype = new_dtype
return vfa
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
def file_reader(
filename,
select_type=None,
index=None,
downsample=1, # noqa
cutoff_at_kV=None,
instrument=None,
lazy=False,
):
"""Reads a bruker bcf file and loads the data into the appropriate class,
then wraps it into appropriate hyperspy required list of dictionaries
used by hyperspy.api.load() method.
Keyword arguments:
select_type -- One of: spectrum, image. If none specified, then function
loads everything, else if specified, loads either just sem imagery,
or just hyper spectral mapping data (default None).
index -- index of dataset in bcf v2 can be None integer and 'all'
(default None); None will select first available mapping if more than one.
'all' will return all maps if more than one present;
integer will return only selected map.
downsample -- the downsample ratio of hyperspectral array (downsampling
height and width only), can be integer from 1 to inf, where '1' means
no downsampling will be applied (default 1).
cutoff_at_kV -- if set (can be int of float >= 0) can be used either, to
crop or enlarge energy range at max values. (default None)
instrument -- str, either 'TEM' or 'SEM'. Default is None.
"""
# objectified bcf file:
obj_bcf = BCF_reader(filename, instrument=instrument)
if select_type == "image":
return bcf_imagery(obj_bcf)
elif select_type == "spectrum":
return bcf_hyperspectra(
obj_bcf,
index=index,
downsample=downsample,
cutoff_at_kV=cutoff_at_kV,
lazy=lazy,
)
else:
return bcf_imagery(obj_bcf) + bcf_hyperspectra(
obj_bcf,
index=index,
downsample=downsample,
cutoff_at_kV=cutoff_at_kV,
lazy=lazy,
)
|
def file_reader(
filename,
select_type=None,
index=0,
downsample=1, # noqa
cutoff_at_kV=None,
instrument=None,
lazy=False,
):
"""Reads a bruker bcf file and loads the data into the appropriate class,
then wraps it into appropriate hyperspy required list of dictionaries
used by hyperspy.api.load() method.
Keyword arguments:
select_type -- One of: spectrum, image. If none specified, then function
loads everything, else if specified, loads either just sem imagery,
or just hyper spectral mapping data. (default None)
index -- index of dataset in bcf v2 (delaut 0)
downsample -- the downsample ratio of hyperspectral array (downsampling
height and width only), can be integer from 1 to inf, where '1' means
no downsampling will be applied (default 1).
cutoff_at_kV -- if set (can be int of float >= 0) can be used either, to
crop or enlarge energy range at max values. (default None)
instrument -- str, either 'TEM' or 'SEM'. Default is None.
"""
# objectified bcf file:
obj_bcf = BCF_reader(filename, instrument=instrument)
if select_type == "image":
return bcf_imagery(obj_bcf)
elif select_type == "spectrum":
return bcf_hyperspectra(
obj_bcf,
index=index,
downsample=downsample,
cutoff_at_kV=cutoff_at_kV,
lazy=lazy,
)
else:
return bcf_imagery(obj_bcf) + bcf_hyperspectra(
obj_bcf,
index=index,
downsample=downsample,
cutoff_at_kV=cutoff_at_kV,
lazy=lazy,
)
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
def bcf_hyperspectra(
obj_bcf,
index=None,
downsample=None,
cutoff_at_kV=None, # noqa
lazy=False,
):
"""Return hyperspy required list of dict with eds
hyperspectra and metadata.
"""
global warn_once
if (fast_unbcf == False) and warn_once:
_logger.warning("""unbcf_fast library is not present...
Parsing BCF with Python-only backend, which is slow... please wait.
If parsing is uncomfortably slow, first install cython, then reinstall hyperspy.
For more information, check the 'Installing HyperSpy' section in the documentation.""")
warn_once = False
if index is None:
indexes = [obj_bcf.def_index]
elif index == "all":
indexes = obj_bcf.available_indexes
else:
indexes = [obj_bcf.check_index_valid(index)]
hyperspectra = []
mode = obj_bcf.header.mode
mapping = get_mapping(mode)
for index in indexes:
obj_bcf.persistent_parse_hypermap(
index=index, downsample=downsample, cutoff_at_kV=cutoff_at_kV, lazy=lazy
)
eds_metadata = obj_bcf.header.get_spectra_metadata(index=index)
hyperspectra.append(
{
"data": obj_bcf.hypermap[index].hypermap,
"axes": [
{
"name": "height",
"size": obj_bcf.hypermap[index].hypermap.shape[0],
"offset": 0,
"scale": obj_bcf.hypermap[index].ycalib,
"units": obj_bcf.header.units,
},
{
"name": "width",
"size": obj_bcf.hypermap[index].hypermap.shape[1],
"offset": 0,
"scale": obj_bcf.hypermap[index].xcalib,
"units": obj_bcf.header.units,
},
{
"name": "Energy",
"size": obj_bcf.hypermap[index].hypermap.shape[2],
"offset": obj_bcf.hypermap[index].calib_abs,
"scale": obj_bcf.hypermap[index].calib_lin,
"units": "keV",
},
],
"metadata":
# where is no way to determine what kind of instrument was used:
# TEM or SEM
{
"Acquisition_instrument": {
mode: obj_bcf.header.get_acq_instrument_dict(
detector=True, index=index
)
},
"General": {
"original_filename": obj_bcf.filename.split("/")[-1],
"title": "EDX",
"date": obj_bcf.header.date,
"time": obj_bcf.header.time,
},
"Sample": {
"name": obj_bcf.header.name,
"elements": sorted(list(obj_bcf.header.elements)),
"xray_lines": sorted(gen_elem_list(obj_bcf.header.elements)),
},
"Signal": {
"signal_type": "EDS_%s" % mode,
"record_by": "spectrum",
"quantity": "X-rays (Counts)",
},
},
"original_metadata": {
"Hardware": eds_metadata.hardware_metadata,
"Detector": eds_metadata.detector_metadata,
"Analysis": eds_metadata.esma_metadata,
"Spectrum": eds_metadata.spectrum_metadata,
"DSP Configuration": obj_bcf.header.dsp_metadata,
"Line counter": obj_bcf.header.line_counter,
"Stage": obj_bcf.header.stage_metadata,
"Microscope": obj_bcf.header.sem_metadata,
},
"mapping": mapping,
}
)
return hyperspectra
|
def bcf_hyperspectra(
obj_bcf,
index=0,
downsample=None,
cutoff_at_kV=None, # noqa
lazy=False,
):
"""Return hyperspy required list of dict with eds
hyperspectra and metadata.
"""
global warn_once
if (fast_unbcf == False) and warn_once:
_logger.warning("""unbcf_fast library is not present...
Parsing BCF with Python-only backend, which is slow... please wait.
If parsing is uncomfortably slow, first install cython, then reinstall hyperspy.
For more information, check the 'Installing HyperSpy' section in the documentation.""")
warn_once = False
obj_bcf.persistent_parse_hypermap(
index=index, downsample=downsample, cutoff_at_kV=cutoff_at_kV, lazy=lazy
)
eds_metadata = obj_bcf.header.get_spectra_metadata(index=index)
mode = obj_bcf.header.mode
mapping = get_mapping(mode)
hyperspectra = [
{
"data": obj_bcf.hypermap[index].hypermap,
"axes": [
{
"name": "height",
"size": obj_bcf.hypermap[index].hypermap.shape[0],
"offset": 0,
"scale": obj_bcf.hypermap[index].ycalib,
"units": obj_bcf.header.units,
},
{
"name": "width",
"size": obj_bcf.hypermap[index].hypermap.shape[1],
"offset": 0,
"scale": obj_bcf.hypermap[index].xcalib,
"units": obj_bcf.header.units,
},
{
"name": "Energy",
"size": obj_bcf.hypermap[index].hypermap.shape[2],
"offset": obj_bcf.hypermap[index].calib_abs,
"scale": obj_bcf.hypermap[index].calib_lin,
"units": "keV",
},
],
"metadata":
# where is no way to determine what kind of instrument was used:
# TEM or SEM
{
"Acquisition_instrument": {
mode: obj_bcf.header.get_acq_instrument_dict(
detector=True, index=index
)
},
"General": {
"original_filename": obj_bcf.filename.split("/")[-1],
"title": "EDX",
"date": obj_bcf.header.date,
"time": obj_bcf.header.time,
},
"Sample": {
"name": obj_bcf.header.name,
"elements": sorted(list(obj_bcf.header.elements)),
"xray_lines": sorted(gen_elem_list(obj_bcf.header.elements)),
},
"Signal": {
"signal_type": "EDS_%s" % mode,
"record_by": "spectrum",
"quantity": "X-rays (Counts)",
},
},
"original_metadata": {
"Hardware": eds_metadata.hardware_metadata,
"Detector": eds_metadata.detector_metadata,
"Analysis": eds_metadata.esma_metadata,
"Spectrum": eds_metadata.spectrum_metadata,
"DSP Configuration": obj_bcf.header.dsp_metadata,
"Line counter": obj_bcf.header.line_counter,
"Stage": obj_bcf.header.stage_metadata,
"Microscope": obj_bcf.header.sem_metadata,
},
"mapping": mapping,
}
]
return hyperspectra
|
https://github.com/hyperspy/hyperspy/issues/1751
|
hs.load(filename)
Traceback (most recent call last):
File "<ipython-input-3-ada763a7934e>", line 1, in <module>
hs.load(filename)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in load
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 243, in <listcomp>
for filename in filenames]
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 286, in load_single_file
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io.py", line 295, in load_with_reader
**kwds)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 1189, in file_reader
obj_bcf = BCF_reader(filename, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 876, in __init__
self.header = HyperHeader(header_byte_str, instrument=instrument)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 585, in __init__
self._set_sum_edx(root)
File "C:\ProgramData\Anaconda3\lib\site-packages\hyperspy\io_plugins\bcf.py", line 735, in _set_sum_edx
str(i))[0].ClassInstance)
IndexError: list index out of range
|
IndexError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.