after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def cat(fname, fallback=_DEFAULT, binary=True):
"""Return file content.
fallback: the value returned in case the file does not exist or
cannot be read
binary: whether to open the file in binary or text mode.
"""
try:
with open_binary(fname) if binary else open_text(fname) as f:
return f.read().strip()
except (IOError, OSError):
if fallback is not _DEFAULT:
return fallback
else:
raise
|
def cat(fname, fallback=_DEFAULT, binary=True):
"""Return file content.
fallback: the value returned in case the file does not exist or
cannot be read
binary: whether to open the file in binary or text mode.
"""
try:
with open_binary(fname) if binary else open_text(fname) as f:
return f.read().strip()
except IOError:
if fallback is not _DEFAULT:
return fallback
else:
raise
|
https://github.com/giampaolo/psutil/issues/1323
|
Traceback (most recent call last):
File "/usr/lib/python-exec/python2.7/s-tui", line 11, in <module>
load_entry_point('s-tui==0.7.5', 'console_scripts', 's-tui')()
File "/usr/lib64/python2.7/site-packages/s_tui/s_tui.py", line 854, in main
graph_controller = GraphController(args)
File "/usr/lib64/python2.7/site-packages/s_tui/s_tui.py", line 680, in __init__
self.view = GraphView(self)
File "/usr/lib64/python2.7/site-packages/s_tui/s_tui.py", line 210, in __init__
self.temp_sensors_menu = TempSensorsMenu(self.on_sensors_menu_close)
File "/usr/lib64/python2.7/site-packages/s_tui/TempSensorsMenu.py", line 70, in __init__
sensors_dict = psutil.sensors_temperatures()
File "/usr/lib64/python2.7/site-packages/psutil/__init__.py", line 2194, in sensors_temperatures
rawdict = _psplatform.sensors_temperatures()
File "/usr/lib64/python2.7/site-packages/psutil/_pslinux.py", line 1150, in sensors_temperatures
current = float(cat(path)) / 1000.0
ValueError: could not convert string to float:
|
ValueError
|
def sensors_temperatures():
"""Return hardware (CPU and others) temperatures as a dict
including hardware name, label, current, max and critical
temperatures.
Implementation notes:
- /sys/class/hwmon looks like the most recent interface to
retrieve this info, and this implementation relies on it
only (old distros will probably use something else)
- lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
- /sys/class/thermal/thermal_zone* is another one but it's more
difficult to parse
"""
ret = collections.defaultdict(list)
basenames = glob.glob("/sys/class/hwmon/hwmon*/temp*_*")
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
# https://github.com/nicolargo/glances/issues/1060
basenames.extend(glob.glob("/sys/class/hwmon/hwmon*/device/temp*_*"))
basenames = sorted(set([x.split("_")[0] for x in basenames]))
for base in basenames:
try:
path = base + "_input"
current = float(cat(path)) / 1000.0
path = os.path.join(os.path.dirname(base), "name")
unit_name = cat(path, binary=False)
except (IOError, OSError, ValueError) as err:
# A lot of things can go wrong here, so let's just skip the
# whole entry. Sure thing is Linux's /sys/class/hwmon really
# is a stinky broken mess.
# https://github.com/giampaolo/psutil/issues/1009
# https://github.com/giampaolo/psutil/issues/1101
# https://github.com/giampaolo/psutil/issues/1129
# https://github.com/giampaolo/psutil/issues/1245
# https://github.com/giampaolo/psutil/issues/1323
warnings.warn("ignoring %r for file %r" % (err, path), RuntimeWarning)
continue
high = cat(base + "_max", fallback=None)
critical = cat(base + "_crit", fallback=None)
label = cat(base + "_label", fallback="", binary=False)
if high is not None:
try:
high = float(high) / 1000.0
except ValueError:
high = None
if critical is not None:
try:
critical = float(critical) / 1000.0
except ValueError:
critical = None
ret[unit_name].append((label, current, high, critical))
return ret
|
def sensors_temperatures():
"""Return hardware (CPU and others) temperatures as a dict
including hardware name, label, current, max and critical
temperatures.
Implementation notes:
- /sys/class/hwmon looks like the most recent interface to
retrieve this info, and this implementation relies on it
only (old distros will probably use something else)
- lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
- /sys/class/thermal/thermal_zone* is another one but it's more
difficult to parse
"""
ret = collections.defaultdict(list)
basenames = glob.glob("/sys/class/hwmon/hwmon*/temp*_*")
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
# https://github.com/nicolargo/glances/issues/1060
basenames.extend(glob.glob("/sys/class/hwmon/hwmon*/device/temp*_*"))
basenames = sorted(set([x.split("_")[0] for x in basenames]))
for base in basenames:
try:
path = base + "_input"
current = float(cat(path)) / 1000.0
path = os.path.join(os.path.dirname(base), "name")
unit_name = cat(path, binary=False)
except (IOError, OSError) as err:
# A lot of things can go wrong here, so let's just skip the
# whole entry.
# https://github.com/giampaolo/psutil/issues/1009
# https://github.com/giampaolo/psutil/issues/1101
# https://github.com/giampaolo/psutil/issues/1129
# https://github.com/giampaolo/psutil/issues/1245
warnings.warn("ignoring %r for file %r" % (err, path), RuntimeWarning)
continue
high = cat(base + "_max", fallback=None)
critical = cat(base + "_crit", fallback=None)
label = cat(base + "_label", fallback="", binary=False)
if high is not None:
high = float(high) / 1000.0
if critical is not None:
critical = float(critical) / 1000.0
ret[unit_name].append((label, current, high, critical))
return ret
|
https://github.com/giampaolo/psutil/issues/1323
|
Traceback (most recent call last):
File "/usr/lib/python-exec/python2.7/s-tui", line 11, in <module>
load_entry_point('s-tui==0.7.5', 'console_scripts', 's-tui')()
File "/usr/lib64/python2.7/site-packages/s_tui/s_tui.py", line 854, in main
graph_controller = GraphController(args)
File "/usr/lib64/python2.7/site-packages/s_tui/s_tui.py", line 680, in __init__
self.view = GraphView(self)
File "/usr/lib64/python2.7/site-packages/s_tui/s_tui.py", line 210, in __init__
self.temp_sensors_menu = TempSensorsMenu(self.on_sensors_menu_close)
File "/usr/lib64/python2.7/site-packages/s_tui/TempSensorsMenu.py", line 70, in __init__
sensors_dict = psutil.sensors_temperatures()
File "/usr/lib64/python2.7/site-packages/psutil/__init__.py", line 2194, in sensors_temperatures
rawdict = _psplatform.sensors_temperatures()
File "/usr/lib64/python2.7/site-packages/psutil/_pslinux.py", line 1150, in sensors_temperatures
current = float(cat(path)) / 1000.0
ValueError: could not convert string to float:
|
ValueError
|
def _proc_cred(self):
@wrap_exceptions
def proc_cred(self):
return cext.proc_cred(self.pid, self._procfs_path)
return proc_cred(self)
|
def _proc_cred(self):
return cext.proc_cred(self.pid, self._procfs_path)
|
https://github.com/giampaolo/psutil/issues/1194
|
p = psutil.Process(960)
p.name()
'xntpd'
p.nice()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/venv/lib/python3.5/site-packages/psutil/__init__.py", line 755, in nice
return self._proc.nice_get()
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 346, in wrapper
return fun(self, *args, **kwargs)
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 451, in nice_get
return cext_posix.getpriority(self.pid)
OSError: [Errno 22] Invalid argument
|
OSError
|
def nice_get(self):
# Note #1: getpriority(3) doesn't work for realtime processes.
# Psinfo is what ps uses, see:
# https://github.com/giampaolo/psutil/issues/1194
return self._proc_basic_info()[proc_info_map["nice"]]
|
def nice_get(self):
# Note #1: for some reason getpriority(3) return ESRCH (no such
# process) for certain low-pid processes, no matter what (even
# as root).
# The process actually exists though, as it has a name,
# creation time, etc.
# The best thing we can do here appears to be raising AD.
# Note: tested on Solaris 11; on Open Solaris 5 everything is
# fine.
#
# Note #2: we also can get niceness from /proc/pid/psinfo
# but it's wrong, see:
# https://github.com/giampaolo/psutil/issues/1082
try:
return cext_posix.getpriority(self.pid)
except EnvironmentError as err:
# 48 is 'operation not supported' but errno does not expose
# it. It occurs for low system pids.
if err.errno in (errno.ENOENT, errno.ESRCH, 48):
if pid_exists(self.pid):
raise AccessDenied(self.pid, self._name)
raise
|
https://github.com/giampaolo/psutil/issues/1194
|
p = psutil.Process(960)
p.name()
'xntpd'
p.nice()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/venv/lib/python3.5/site-packages/psutil/__init__.py", line 755, in nice
return self._proc.nice_get()
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 346, in wrapper
return fun(self, *args, **kwargs)
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 451, in nice_get
return cext_posix.getpriority(self.pid)
OSError: [Errno 22] Invalid argument
|
OSError
|
def uids(self):
try:
real, effective, saved, _, _, _ = self._proc_cred()
except AccessDenied:
real = self._proc_basic_info()[proc_info_map["uid"]]
effective = self._proc_basic_info()[proc_info_map["euid"]]
saved = None
return _common.puids(real, effective, saved)
|
def uids(self):
real, effective, saved, _, _, _ = self._proc_cred()
return _common.puids(real, effective, saved)
|
https://github.com/giampaolo/psutil/issues/1194
|
p = psutil.Process(960)
p.name()
'xntpd'
p.nice()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/venv/lib/python3.5/site-packages/psutil/__init__.py", line 755, in nice
return self._proc.nice_get()
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 346, in wrapper
return fun(self, *args, **kwargs)
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 451, in nice_get
return cext_posix.getpriority(self.pid)
OSError: [Errno 22] Invalid argument
|
OSError
|
def gids(self):
try:
_, _, _, real, effective, saved = self._proc_cred()
except AccessDenied:
real = self._proc_basic_info()[proc_info_map["gid"]]
effective = self._proc_basic_info()[proc_info_map["egid"]]
saved = None
return _common.puids(real, effective, saved)
|
def gids(self):
_, _, _, real, effective, saved = self._proc_cred()
return _common.puids(real, effective, saved)
|
https://github.com/giampaolo/psutil/issues/1194
|
p = psutil.Process(960)
p.name()
'xntpd'
p.nice()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/venv/lib/python3.5/site-packages/psutil/__init__.py", line 755, in nice
return self._proc.nice_get()
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 346, in wrapper
return fun(self, *args, **kwargs)
File "/path/to/venv/lib/python3.5/site-packages/psutil/_pssunos.py", line 451, in nice_get
return cext_posix.getpriority(self.pid)
OSError: [Errno 22] Invalid argument
|
OSError
|
def sensors_temperatures():
"""Return hardware (CPU and others) temperatures as a dict
including hardware name, label, current, max and critical
temperatures.
Implementation notes:
- /sys/class/hwmon looks like the most recent interface to
retrieve this info, and this implementation relies on it
only (old distros will probably use something else)
- lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
- /sys/class/thermal/thermal_zone* is another one but it's more
difficult to parse
"""
ret = collections.defaultdict(list)
basenames = glob.glob("/sys/class/hwmon/hwmon*/temp*_*")
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
# https://github.com/nicolargo/glances/issues/1060
basenames.extend(glob.glob("/sys/class/hwmon/hwmon*/device/temp*_*"))
basenames = sorted(set([x.split("_")[0] for x in basenames]))
for base in basenames:
try:
current = float(cat(base + "_input")) / 1000.0
unit_name = cat(os.path.join(os.path.dirname(base), "name"), binary=False)
except (IOError, OSError) as err:
# A lot of things can go wrong here, so let's just skip the
# whole entry.
# https://github.com/giampaolo/psutil/issues/1009
# https://github.com/giampaolo/psutil/issues/1101
# https://github.com/giampaolo/psutil/issues/1129
# https://github.com/giampaolo/psutil/issues/1245
warnings.warn("ignoring %r" % err, RuntimeWarning)
continue
high = cat(base + "_max", fallback=None)
critical = cat(base + "_crit", fallback=None)
label = cat(base + "_label", fallback="", binary=False)
if high is not None:
high = float(high) / 1000.0
if critical is not None:
critical = float(critical) / 1000.0
ret[unit_name].append((label, current, high, critical))
return ret
|
def sensors_temperatures():
"""Return hardware (CPU and others) temperatures as a dict
including hardware name, label, current, max and critical
temperatures.
Implementation notes:
- /sys/class/hwmon looks like the most recent interface to
retrieve this info, and this implementation relies on it
only (old distros will probably use something else)
- lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
- /sys/class/thermal/thermal_zone* is another one but it's more
difficult to parse
"""
ret = collections.defaultdict(list)
basenames = glob.glob("/sys/class/hwmon/hwmon*/temp*_*")
# CentOS has an intermediate /device directory:
# https://github.com/giampaolo/psutil/issues/971
# https://github.com/nicolargo/glances/issues/1060
basenames.extend(glob.glob("/sys/class/hwmon/hwmon*/device/temp*_*"))
basenames = sorted(set([x.split("_")[0] for x in basenames]))
for base in basenames:
try:
current = float(cat(base + "_input")) / 1000.0
except (IOError, OSError) as err:
# A lot of things can go wrong here, so let's just skip the
# whole entry.
# https://github.com/giampaolo/psutil/issues/1009
# https://github.com/giampaolo/psutil/issues/1101
# https://github.com/giampaolo/psutil/issues/1129
warnings.warn("ignoring %r" % err, RuntimeWarning)
continue
unit_name = cat(os.path.join(os.path.dirname(base), "name"), binary=False)
high = cat(base + "_max", fallback=None)
critical = cat(base + "_crit", fallback=None)
label = cat(base + "_label", fallback="", binary=False)
if high is not None:
high = float(high) / 1000.0
if critical is not None:
critical = float(critical) / 1000.0
ret[unit_name].append((label, current, high, critical))
return ret
|
https://github.com/giampaolo/psutil/issues/1245
|
import psutil
psutil.sensors_temperatures()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python2.7/site-packages/psutil/__init__.py", line 2195, in sensors_temperatures
rawdict = _psplatform.sensors_temperatures()
File "/usr/lib64/python2.7/site-packages/psutil/_pslinux.py", line 1154, in sensors_temperatures
binary=False)
File "/usr/lib64/python2.7/site-packages/psutil/_pslinux.py", line 295, in cat
with open_binary(fname) if binary else open_text(fname) as f:
File "/usr/lib64/python2.7/site-packages/psutil/_pslinux.py", line 201, in open_text
return open(fname, "rt", **kwargs)
IOError: [Errno 2] No such file or directory: '/sys/class/hwmon/hwmon1/device/name'
|
IOError
|
def wait(self, timeout=None):
if timeout is None:
cext_timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds
cext_timeout = int(timeout * 1000)
while True:
ret = cext.proc_wait(self.pid, cext_timeout)
if ret == WAIT_TIMEOUT:
raise TimeoutExpired(timeout, self.pid, self._name)
if timeout is None and pid_exists(self.pid):
continue
return ret
|
def wait(self, timeout=None):
if timeout is None:
cext_timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds
cext_timeout = int(timeout * 1000)
ret = cext.proc_wait(self.pid, cext_timeout)
if ret == WAIT_TIMEOUT:
raise TimeoutExpired(timeout, self.pid, self._name)
return ret
|
https://github.com/giampaolo/psutil/issues/1098
|
Traceback (most recent call last):
File "c:\projects\psutil\psutil\tests\test_misc.py", line 923, in test_reap_children
reap_children()
File "c:\projects\psutil\psutil\tests\__init__.py", line 438, in reap_children
assert_gone(pid)
File "c:\projects\psutil\psutil\tests\__init__.py", line 388, in assert_gone
assert not psutil.pid_exists(pid), pid
AssertionError: 3028
======================================================================
|
AssertionError
|
def wait(self, timeout=None):
if timeout is None:
cext_timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds
cext_timeout = int(timeout * 1000)
while True:
ret = cext.proc_wait(self.pid, cext_timeout)
if ret == WAIT_TIMEOUT:
raise TimeoutExpired(timeout, self.pid, self._name)
if pid_exists(self.pid):
if timeout is None:
continue
else:
raise TimeoutExpired(timeout, self.pid, self._name)
return ret
|
def wait(self, timeout=None):
if timeout is None:
cext_timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds
cext_timeout = int(timeout * 1000)
while True:
ret = cext.proc_wait(self.pid, cext_timeout)
if ret == WAIT_TIMEOUT:
raise TimeoutExpired(timeout, self.pid, self._name)
if timeout is None and pid_exists(self.pid):
continue
return ret
|
https://github.com/giampaolo/psutil/issues/1098
|
Traceback (most recent call last):
File "c:\projects\psutil\psutil\tests\test_misc.py", line 923, in test_reap_children
reap_children()
File "c:\projects\psutil\psutil\tests\__init__.py", line 438, in reap_children
assert_gone(pid)
File "c:\projects\psutil\psutil\tests\__init__.py", line 388, in assert_gone
assert not psutil.pid_exists(pid), pid
AssertionError: 3028
======================================================================
|
AssertionError
|
def nice_get(self):
# Note #1: for some reason getpriority(3) return ESRCH (no such
# process) for certain low-pid processes, no matter what (even
# as root).
# The process actually exists though, as it has a name,
# creation time, etc.
# The best thing we can do here appears to be raising AD.
# Note: tested on Solaris 11; on Open Solaris 5 everything is
# fine.
#
# Note #2: we also can get niceness from /proc/pid/psinfo
# but it's wrong, see:
# https://github.com/giampaolo/psutil/issues/1082
try:
return cext_posix.getpriority(self.pid)
except EnvironmentError as err:
# 48 is 'operation not supported' but errno does not expose
# it. It occurs for low system pids.
if err.errno in (errno.ENOENT, errno.ESRCH, 48):
if pid_exists(self.pid):
raise AccessDenied(self.pid, self._name)
raise
|
def nice_get(self):
# For some reason getpriority(3) return ESRCH (no such process)
# for certain low-pid processes, no matter what (even as root).
# The process actually exists though, as it has a name,
# creation time, etc.
# The best thing we can do here appears to be raising AD.
# Note: tested on Solaris 11; on Open Solaris 5 everything is
# fine.
try:
return cext_posix.getpriority(self.pid)
except EnvironmentError as err:
# 48 is 'operation not supported' but errno does not expose
# it. It occurs for low system pids.
if err.errno in (errno.ENOENT, errno.ESRCH, 48):
if pid_exists(self.pid):
raise AccessDenied(self.pid, self._name)
raise
|
https://github.com/giampaolo/psutil/issues/1082
|
======================================================================
FAIL: psutil.tests.test_posix.TestProcess.test_nice
----------------------------------------------------------------------
Traceback (most recent call last):
File "psutil/tests/test_posix.py", line 208, in test_nice
self.assertEqual(ps_nice, psutil_nice)
AssertionError: 20 != 0
|
AssertionError
|
def sensors_battery():
"""Return battery info."""
try:
percent, minsleft, power_plugged = cext.sensors_battery()
except NotImplementedError:
# see: https://github.com/giampaolo/psutil/issues/1074
return None
power_plugged = power_plugged == 1
if power_plugged:
secsleft = _common.POWER_TIME_UNLIMITED
elif minsleft == -1:
secsleft = _common.POWER_TIME_UNKNOWN
else:
secsleft = minsleft * 60
return _common.sbattery(percent, secsleft, power_plugged)
|
def sensors_battery():
"""Return battery info."""
percent, minsleft, power_plugged = cext.sensors_battery()
power_plugged = power_plugged == 1
if power_plugged:
secsleft = _common.POWER_TIME_UNLIMITED
elif minsleft == -1:
secsleft = _common.POWER_TIME_UNKNOWN
else:
secsleft = minsleft * 60
return _common.sbattery(percent, secsleft, power_plugged)
|
https://github.com/giampaolo/psutil/issues/1074
|
#PYTHONPATH=/usr/src/psutil python2 psutil/tests/test_connections.py
Traceback (most recent call last):
File "psutil/tests/test_connections.py", line 30, in <module>
from psutil.tests import AF_UNIX
File "/usr/src/psutil/psutil/tests/__init__.py", line 156, in <module>
HAS_BATTERY = HAS_SENSORS_BATTERY and psutil.sensors_battery()
File "/usr/src/psutil/psutil/__init__.py", line 2316, in sensors_battery
return _psplatform.sensors_battery()
File "/usr/src/psutil/psutil/_psbsd.py", line 411, in sensors_battery
percent, minsleft, power_plugged = cext.sensors_battery()
OSError: [Errno 2] No such file or directory
|
OSError
|
def pid_exists(pid):
"""Check for the existence of a unix PID."""
if not _psposix.pid_exists(pid):
return False
else:
# Linux's apparently does not distinguish between PIDs and TIDs
# (thread IDs).
# listdir("/proc") won't show any TID (only PIDs) but
# os.stat("/proc/{tid}") will succeed if {tid} exists.
# os.kill() can also be passed a TID. This is quite confusing.
# In here we want to enforce this distinction and support PIDs
# only, see:
# https://github.com/giampaolo/psutil/issues/687
try:
# Note: already checked that this is faster than using a
# regular expr. Also (a lot) faster than doing
# 'return pid in pids()'
with open_binary("%s/%s/status" % (get_procfs_path(), pid)) as f:
for line in f:
if line.startswith(b"Tgid:"):
tgid = int(line.split()[1])
# If tgid and pid are the same then we're
# dealing with a process PID.
return tgid == pid
raise ValueError("'Tgid' line not found")
except (EnvironmentError, ValueError):
return pid in pids()
|
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
|
https://github.com/giampaolo/psutil/issues/687
|
Traceback (most recent call last):
File "/home/giampaolo/svn/psutil/test/test_psutil.py", line 738, in test_pid_exists_range
assert not os.path.exists('/proc/%s' % pid), pid
AssertionError: 947
|
AssertionError
|
def swap_memory():
"""System swap memory as (total, used, free, sin, sout) namedtuple."""
total, used, free, sin, sout = cext.swap_mem()
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent, sin, sout)
|
def swap_memory():
"""System swap memory as (total, used, free, sin, sout) namedtuple."""
pagesize = 1 if OPENBSD else PAGESIZE
total, used, free, sin, sout = [x * pagesize for x in cext.swap_mem()]
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent, sin, sout)
|
https://github.com/giampaolo/psutil/issues/918
|
======================================================================
FAIL: test_bsd.NetBSDSpecificTestCase.test_swapmem_free
----------------------------------------------------------------------
Traceback (most recent call last):
File "/vagrant/psutil/psutil/tests/test_bsd.py", line 401, in test_swapmem_free
psutil.swap_memory().free, self.parse_meminfo("SwapFree:"))
AssertionError: 2197127430144L != 536403968
======================================================================
FAIL: test_bsd.NetBSDSpecificTestCase.test_swapmem_total
----------------------------------------------------------------------
Traceback (most recent call last):
File "/vagrant/psutil/psutil/tests/test_bsd.py", line 397, in test_swapmem_total
psutil.swap_memory().total, self.parse_meminfo("SwapTotal:"))
AssertionError: 2197127430144L != 536403968
======================================================================
FAIL: test_bsd.NetBSDSpecificTestCase.test_vmem_free
----------------------------------------------------------------------
Traceback (most recent call last):
File "/vagrant/psutil/psutil/tests/test_bsd.py", line 385, in test_vmem_free
psutil.virtual_memory().buffers, self.parse_meminfo("MemFree:"))
AssertionError: 46714880 != 1939644416
======================================================================
FAIL: test_bsd.NetBSDSpecificTestCase.test_vmem_total
----------------------------------------------------------------------
Traceback (most recent call last):
File "/vagrant/psutil/psutil/tests/test_bsd.py", line 381, in test_vmem_total
psutil.virtual_memory().total, self.parse_meminfo("MemTotal:"))
AssertionError: 2147020800L != 2067423232
|
AssertionError
|
def cpu_affinity_set(self, cpus):
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except (OSError, ValueError) as err:
if isinstance(err, ValueError) or err.errno == errno.EINVAL:
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU number %r; choose between %s" % (cpu, allcpus)
)
raise
|
def cpu_affinity_set(self, cpus):
try:
cext.proc_cpu_affinity_set(self.pid, cpus)
except OSError as err:
if err.errno == errno.EINVAL:
allcpus = tuple(range(len(per_cpu_times())))
for cpu in cpus:
if cpu not in allcpus:
raise ValueError(
"invalid CPU #%i (choose between %s)" % (cpu, allcpus)
)
raise
|
https://github.com/giampaolo/psutil/issues/892
|
~/svn/psutil {master}$ python3 -c "import psutil; psutil.Process().cpu_affinity([-1])"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/giampaolo/svn/psutil/psutil/__init__.py", line 764, in cpu_affinity
self._proc.cpu_affinity_set(list(set(cpus)))
File "/home/giampaolo/svn/psutil/psutil/_pslinux.py", line 1067, in wrapper
return fun(self, *args, **kwargs)
File "/home/giampaolo/svn/psutil/psutil/_pslinux.py", line 1423, in cpu_affinity_set
cext.proc_cpu_affinity_set(self.pid, cpus)
SystemError: <built-in function proc_cpu_affinity_set> returned NULL without setting an error
|
SystemError
|
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
with open_text(file, buffering=BIGGER_FILE_BUFFERING) as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
if " " not in line:
# see: https://github.com/giampaolo/psutil/issues/766
continue
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
|
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
with open_text(file, buffering=BIGGER_FILE_BUFFERING) as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
|
https://github.com/giampaolo/psutil/issues/766
|
Could not find neovim socket error while parsing /proc/net/unix; malformed line '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n'
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 525, in process_unix
_, _, _, _, type_, _, inode = tokens[0:7]
ValueError: need more than 1 value to unpack
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/teto/.i3/i3dispatch.py", line 115, in get_nvim_socket
unix_sockets = child.connections(kind="unix")
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/__init__.py", line 1010, in connections
return self._proc.connections(kind)
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 722, in wrapper
return fun(self, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 1178, in connections
ret = _connections.retrieve(kind, self.pid)
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 568, in retrieve
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 529, in process_unix
file, line))
RuntimeError: error while parsing /proc/net/unix; malformed line '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n'
|
ValueError
|
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith("6") and not os.path.exists(file):
# IPv6 not supported
return
with open_text(file, buffering=BIGGER_FILE_BUFFERING) as f:
f.readline() # skip the first line
for lineno, line in enumerate(f, 1):
try:
_, laddr, raddr, status, _, _, _, _, _, inode = line.split()[:10]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %s %r"
% (file, lineno, line)
)
if inode in inodes:
# # We assume inet sockets are unique, so we error
# # out if there are multiple references to the
# # same inode. We won't do this for UNIX sockets.
# if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
# raise ValueError("ambiguos inode with multiple "
# "PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
try:
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
except _Ipv6UnsupportedError:
continue
yield (fd, family, type_, laddr, raddr, status, pid)
|
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith("6") and not os.path.exists(file):
# IPv6 not supported
return
with open_text(file, buffering=BIGGER_FILE_BUFFERING) as f:
f.readline() # skip the first line
for line in f:
try:
_, laddr, raddr, status, _, _, _, _, _, inode = line.split()[:10]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# # We assume inet sockets are unique, so we error
# # out if there are multiple references to the
# # same inode. We won't do this for UNIX sockets.
# if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
# raise ValueError("ambiguos inode with multiple "
# "PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
try:
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
except _Ipv6UnsupportedError:
continue
yield (fd, family, type_, laddr, raddr, status, pid)
|
https://github.com/giampaolo/psutil/issues/766
|
Could not find neovim socket error while parsing /proc/net/unix; malformed line '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n'
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 525, in process_unix
_, _, _, _, type_, _, inode = tokens[0:7]
ValueError: need more than 1 value to unpack
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/teto/.i3/i3dispatch.py", line 115, in get_nvim_socket
unix_sockets = child.connections(kind="unix")
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/__init__.py", line 1010, in connections
return self._proc.connections(kind)
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 722, in wrapper
return fun(self, *args, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 1178, in connections
ret = _connections.retrieve(kind, self.pid)
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 568, in retrieve
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
File "/usr/local/lib/python3.4/dist-packages/psutil-3.2.0-py3.4-linux-x86_64.egg/psutil/_pslinux.py", line 529, in process_unix
file, line))
RuntimeError: error while parsing /proc/net/unix; malformed line '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n'
|
ValueError
|
def memory_maps(self):
"""Return process's mapped memory regions as a list of named tuples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
with open_text(
"%s/%s/smaps" % (self._procfs_path, self.pid), buffering=BIGGER_FILE_BUFFERING
) as f:
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(":"):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith("VmFlags:"):
# see issue #369
continue
else:
raise ValueError(
"don't know how to interpret line %r" % line
)
yield (current_block.pop(), data)
ls = []
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = hfields + [""]
if not path:
path = "[anon]"
else:
path = path.strip()
if path.endswith(" (deleted)") and not path_exists_strict(path):
path = path[:-10]
ls.append(
(
addr,
perms,
path,
data["Rss:"],
data.get("Size:", 0),
data.get("Pss:", 0),
data.get("Shared_Clean:", 0),
data.get("Shared_Dirty:", 0),
data.get("Private_Clean:", 0),
data.get("Private_Dirty:", 0),
data.get("Referenced:", 0),
data.get("Anonymous:", 0),
data.get("Swap:", 0),
)
)
return ls
|
def memory_maps(self):
"""Return process's mapped memory regions as a list of named tuples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
with open_text(
"%s/%s/smaps" % (self._procfs_path, self.pid), buffering=BIGGER_FILE_BUFFERING
) as f:
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if not fields[0].endswith(":"):
# new block section
yield (current_block.pop(), data)
current_block.append(line)
else:
try:
data[fields[0]] = int(fields[1]) * 1024
except ValueError:
if fields[0].startswith("VmFlags:"):
# see issue #369
continue
else:
raise ValueError(
"don't know how to interpret line %r" % line
)
yield (current_block.pop(), data)
ls = []
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = hfields + [""]
if not path:
path = "[anon]"
else:
path = path.strip()
ls.append(
(
addr,
perms,
path,
data["Rss:"],
data.get("Size:", 0),
data.get("Pss:", 0),
data.get("Shared_Clean:", 0),
data.get("Shared_Dirty:", 0),
data.get("Private_Clean:", 0),
data.get("Private_Dirty:", 0),
data.get("Referenced:", 0),
data.get("Anonymous:", 0),
data.get("Swap:", 0),
)
)
return ls
|
https://github.com/giampaolo/psutil/issues/759
|
======================================================================
FAIL: test_memory_maps (test_process.TestProcess)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/giampaolo/svn/psutil/psutil/tests/test_process.py", line 601, in test_memory_maps
os.path.islink(nt.path), nt.path
AssertionError: /run/shm/sem.SiOnY9 (deleted)
|
AssertionError
|
def get_ethtool_macro():
# see: https://github.com/giampaolo/psutil/issues/659
from distutils.unixccompiler import UnixCCompiler
from distutils.errors import CompileError
with tempfile.NamedTemporaryFile(suffix=".c", delete=False, mode="wt") as f:
f.write("#include <linux/ethtool.h>")
@atexit.register
def on_exit():
try:
os.remove(f.name)
except OSError:
pass
compiler = UnixCCompiler()
try:
with silenced_output("stderr"):
with silenced_output("stdout"):
compiler.compile([f.name])
except CompileError:
return ("PSUTIL_ETHTOOL_MISSING_TYPES", 1)
else:
return None
|
def get_ethtool_macro():
# see: https://github.com/giampaolo/psutil/issues/659
from distutils.unixccompiler import UnixCCompiler
from distutils.errors import CompileError
with tempfile.NamedTemporaryFile(suffix=".c", delete=False, mode="wt") as f:
f.write("#include <linux/ethtool.h>")
atexit.register(os.remove, f.name)
compiler = UnixCCompiler()
try:
with captured_output("stderr"):
with captured_output("stdout"):
compiler.compile([f.name])
except CompileError:
return ("PSUTIL_ETHTOOL_MISSING_TYPES", 1)
else:
return None
|
https://github.com/giampaolo/psutil/issues/677
|
root@ip-10-0-2-186:/home/ubuntu# easy_install psutil==3.2.0
Searching for psutil==3.2.0
Reading https://pypi.python.org/simple/psutil/
Best match: psutil 3.2.0
Downloading https://pypi.python.org/packages/source/p/psutil/psutil-3.2.0.tar.gz#md5=224c2bb432003d74d022ced4409df1bc
Processing psutil-3.2.0.tar.gz
Writing /tmp/easy_install-IRRDCd/psutil-3.2.0/setup.cfg
Running psutil-3.2.0/setup.py -q bdist_egg --dist-dir /tmp/easy_install-IRRDCd/psutil-3.2.0/egg-dist-tmp-3ewURn
Traceback (most recent call last):
File "/usr/bin/easy_install", line 9, in <module>
load_entry_point('setuptools==3.3', 'console_scripts', 'easy_install')()
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 1963, in main
with_ei_usage(lambda:
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 1950, in with_ei_usage
return f()
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 1967, in <lambda>
distclass=DistributionWithoutHelpCommands, **kw
File "/usr/lib/python2.7/distutils/core.py", line 151, in setup
dist.run_commands()
File "/usr/lib/python2.7/distutils/dist.py", line 953, in run_commands
self.run_command(cmd)
File "/usr/lib/python2.7/distutils/dist.py", line 972, in run_command
cmd_obj.run()
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 381, in run
self.easy_install(spec, not self.no_deps)
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 616, in easy_install
return self.install_item(spec, dist.location, tmpdir, deps)
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 646, in install_item
dists = self.install_eggs(spec, download, tmpdir)
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 834, in install_eggs
return self.build_and_install(setup_script, setup_base)
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 1040, in build_and_install
self.run_setup(setup_script, setup_base, args)
File "/usr/lib/python2.7/dist-packages/setuptools/command/easy_install.py", line 1025, in run_setup
run_setup(setup_script, args)
File "/usr/lib/python2.7/dist-packages/setuptools/sandbox.py", line 50, in run_setup
lambda: execfile(
File "/usr/lib/python2.7/dist-packages/setuptools/sandbox.py", line 100, in run
return func()
File "/usr/lib/python2.7/dist-packages/setuptools/sandbox.py", line 52, in <lambda>
{'__file__':setup_script, '__name__':'__main__'}
File "setup.py", line 150, in <module>
File "setup.py", line 144, in get_ethtool_macro
File "/usr/lib/python2.7/distutils/ccompiler.py", line 566, in compile
depends, extra_postargs)
File "/usr/lib/python2.7/distutils/ccompiler.py", line 362, in _setup_compile
self.mkpath(os.path.dirname(obj))
File "/usr/lib/python2.7/distutils/ccompiler.py", line 881, in mkpath
mkpath(name, mode, dry_run=self.dry_run)
File "/usr/lib/python2.7/distutils/dir_util.py", line 68, in mkpath
log.info("creating %s", head)
File "/usr/lib/python2.7/distutils/log.py", line 40, in info
self._log(INFO, msg, args)
File "/usr/lib/python2.7/distutils/log.py", line 30, in _log
stream.write('%s\n' % msg)
TypeError: unicode argument expected, got 'str'
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/lib/python2.7/dist-packages/setuptools/sandbox.py", line 124, in wrap
return original(path,*args,**kw)
OSError: [Errno 2] No such file or directory: '/tmp/easy_install-IRRDCd/psutil-3.2.0/temp/tmpzcgy03.c'
Error in sys.exitfunc:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/lib/python2.7/dist-packages/setuptools/sandbox.py", line 124, in wrap
return original(path,*args,**kw)
OSError: [Errno 2] No such file or directory: '/tmp/easy_install-IRRDCd/psutil-3.2.0/temp/tmpzcgy03.c'
|
TypeError
|
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith("6") and not os.path.exists(file):
# IPv6 not supported
return
kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()
with open(file, "rt", **kw) as f:
f.readline() # skip the first line
for line in f:
try:
_, laddr, raddr, status, _, _, _, _, _, inode = line.split()[:10]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# # We assume inet sockets are unique, so we error
# # out if there are multiple references to the
# # same inode. We won't do this for UNIX sockets.
# if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
# raise ValueError("ambiguos inode with multiple "
# "PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
yield (fd, family, type_, laddr, raddr, status, pid)
|
def process_inet(self, file, family, type_, inodes, filter_pid=None):
"""Parse /proc/net/tcp* and /proc/net/udp* files."""
if file.endswith("6") and not os.path.exists(file):
# IPv6 not supported
return
with open(file, "rt") as f:
f.readline() # skip the first line
for line in f:
try:
_, laddr, raddr, status, _, _, _, _, _, inode = line.split()[:10]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# # We assume inet sockets are unique, so we error
# # out if there are multiple references to the
# # same inode. We won't do this for UNIX sockets.
# if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
# raise ValueError("ambiguos inode with multiple "
# "PIDs references")
pid, fd = inodes[inode][0]
else:
pid, fd = None, -1
if filter_pid is not None and filter_pid != pid:
continue
else:
if type_ == socket.SOCK_STREAM:
status = TCP_STATUSES[status]
else:
status = _common.CONN_NONE
laddr = self.decode_address(laddr, family)
raddr = self.decode_address(raddr, family)
yield (fd, family, type_, laddr, raddr, status, pid)
|
https://github.com/giampaolo/psutil/issues/675
|
NVIM detected
Retreived terminal pid 13109, nvim should be one of its children
proc name rxvt-unicode with 5 children
child name & pid rxvt-unicode/13110
child name & pid zsh/13111
child name & pid nvim/13272
Could not find neovim socket 'utf-8' codec can't decode byte 0xd7 in position 469: invalid continuation byte
Traceback (most recent call last):
File "/home/teto/.i3/i3nvim.py", line 56, in get_nvim_socket
unix_sockets = child.connections(kind="unix")
File "/home/teto/.local/lib/python3.4/site-packages/psutil/__init__.py", line 1010, in connections
return self._proc.connections(kind)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 709, in wrapper
return fun(self, *args, **kwargs)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 1169, in connections
ret = _connections.retrieve(kind, self.pid)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 555, in retrieve
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 508, in process_unix
f.readline() # skip the first line
File "/usr/lib/python3.4/codecs.py", line 319, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd7 in position 469: invalid continuation byte
|
UnicodeDecodeError
|
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
kw = dict(encoding=DEFAULT_ENCODING) if PY3 else dict()
with open(file, "rt", **kw) as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
|
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
with open(file, "rt") as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
|
https://github.com/giampaolo/psutil/issues/675
|
NVIM detected
Retreived terminal pid 13109, nvim should be one of its children
proc name rxvt-unicode with 5 children
child name & pid rxvt-unicode/13110
child name & pid zsh/13111
child name & pid nvim/13272
Could not find neovim socket 'utf-8' codec can't decode byte 0xd7 in position 469: invalid continuation byte
Traceback (most recent call last):
File "/home/teto/.i3/i3nvim.py", line 56, in get_nvim_socket
unix_sockets = child.connections(kind="unix")
File "/home/teto/.local/lib/python3.4/site-packages/psutil/__init__.py", line 1010, in connections
return self._proc.connections(kind)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 709, in wrapper
return fun(self, *args, **kwargs)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 1169, in connections
ret = _connections.retrieve(kind, self.pid)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 555, in retrieve
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 508, in process_unix
f.readline() # skip the first line
File "/usr/lib/python3.4/codecs.py", line 319, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd7 in position 469: invalid continuation byte
|
UnicodeDecodeError
|
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
# see: https://github.com/giampaolo/psutil/issues/675
kw = dict(encoding=FS_ENCODING, errors="replace") if PY3 else dict()
with open(file, "rt", **kw) as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
|
def process_unix(self, file, family, inodes, filter_pid=None):
"""Parse /proc/net/unix files."""
with open_text(file) as f:
f.readline() # skip the first line
for line in f:
tokens = line.split()
try:
_, _, _, _, type_, _, inode = tokens[0:7]
except ValueError:
raise RuntimeError(
"error while parsing %s; malformed line %r" % (file, line)
)
if inode in inodes:
# With UNIX sockets we can have a single inode
# referencing many file descriptors.
pairs = inodes[inode]
else:
pairs = [(None, -1)]
for pid, fd in pairs:
if filter_pid is not None and filter_pid != pid:
continue
else:
if len(tokens) == 8:
path = tokens[-1]
else:
path = ""
type_ = int(type_)
raddr = None
status = _common.CONN_NONE
yield (fd, family, type_, path, raddr, status, pid)
|
https://github.com/giampaolo/psutil/issues/675
|
NVIM detected
Retreived terminal pid 13109, nvim should be one of its children
proc name rxvt-unicode with 5 children
child name & pid rxvt-unicode/13110
child name & pid zsh/13111
child name & pid nvim/13272
Could not find neovim socket 'utf-8' codec can't decode byte 0xd7 in position 469: invalid continuation byte
Traceback (most recent call last):
File "/home/teto/.i3/i3nvim.py", line 56, in get_nvim_socket
unix_sockets = child.connections(kind="unix")
File "/home/teto/.local/lib/python3.4/site-packages/psutil/__init__.py", line 1010, in connections
return self._proc.connections(kind)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 709, in wrapper
return fun(self, *args, **kwargs)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 1169, in connections
ret = _connections.retrieve(kind, self.pid)
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 555, in retrieve
for fd, family, type_, laddr, raddr, status, bound_pid in ls:
File "/home/teto/.local/lib/python3.4/site-packages/psutil/_pslinux.py", line 508, in process_unix
f.readline() # skip the first line
File "/usr/lib/python3.4/codecs.py", line 319, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xd7 in position 469: invalid continuation byte
|
UnicodeDecodeError
|
def poll(interval):
"""Calculate IO usage by comparing IO statics before and
after the interval.
Return a tuple including all currently running processes
sorted by IO activity and total disks I/O activity.
"""
# first get a list of all processes and disk io counters
procs = [p for p in psutil.process_iter()]
for p in procs[:]:
try:
p._before = p.io_counters()
except psutil.Error:
procs.remove(p)
continue
disks_before = psutil.disk_io_counters()
# sleep some time
time.sleep(interval)
# then retrieve the same info again
for p in procs[:]:
try:
p._after = p.io_counters()
p._cmdline = " ".join(p.cmdline())
if not p._cmdline:
p._cmdline = p.name()
p._username = p.username()
except (psutil.NoSuchProcess, psutil.ZombieProcess):
procs.remove(p)
disks_after = psutil.disk_io_counters()
# finally calculate results by comparing data before and
# after the interval
for p in procs:
p._read_per_sec = p._after.read_bytes - p._before.read_bytes
p._write_per_sec = p._after.write_bytes - p._before.write_bytes
p._total = p._read_per_sec + p._write_per_sec
disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes
# sort processes by total disk IO so that the more intensive
# ones get listed first
processes = sorted(procs, key=lambda p: p._total, reverse=True)
return (processes, disks_read_per_sec, disks_write_per_sec)
|
def poll(interval):
"""Calculate IO usage by comparing IO statics before and
after the interval.
Return a tuple including all currently running processes
sorted by IO activity and total disks I/O activity.
"""
# first get a list of all processes and disk io counters
procs = [p for p in psutil.process_iter()]
for p in procs[:]:
try:
p._before = p.io_counters()
except psutil.Error:
procs.remove(p)
continue
disks_before = psutil.disk_io_counters()
# sleep some time
time.sleep(interval)
# then retrieve the same info again
for p in procs[:]:
try:
p._after = p.io_counters()
p._cmdline = " ".join(p.cmdline())
if not p._cmdline:
p._cmdline = p.name()
p._username = p.username()
except psutil.NoSuchProcess:
procs.remove(p)
disks_after = psutil.disk_io_counters()
# finally calculate results by comparing data before and
# after the interval
for p in procs:
p._read_per_sec = p._after.read_bytes - p._before.read_bytes
p._write_per_sec = p._after.write_bytes - p._before.write_bytes
p._total = p._read_per_sec + p._write_per_sec
disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes
# sort processes by total disk IO so that the more intensive
# ones get listed first
processes = sorted(procs, key=lambda p: p._total, reverse=True)
return (processes, disks_read_per_sec, disks_write_per_sec)
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def run(pid):
ACCESS_DENIED = ""
try:
p = psutil.Process(pid)
pinfo = p.as_dict(ad_value=ACCESS_DENIED)
except psutil.NoSuchProcess as err:
sys.exit(str(err))
try:
parent = p.parent()
if parent:
parent = "(%s)" % parent.name()
else:
parent = ""
except psutil.Error:
parent = ""
if pinfo["create_time"] != ACCESS_DENIED:
started = datetime.datetime.fromtimestamp(pinfo["create_time"]).strftime(
"%Y-%m-%d %H:%M"
)
else:
started = ACCESS_DENIED
io = pinfo.get("io_counters", ACCESS_DENIED)
if pinfo["memory_info"] != ACCESS_DENIED:
mem = "%s%% (resident=%s, virtual=%s) " % (
round(pinfo["memory_percent"], 1),
convert_bytes(pinfo["memory_info"].rss),
convert_bytes(pinfo["memory_info"].vms),
)
else:
mem = ACCESS_DENIED
children = p.children()
print_("pid", pinfo["pid"])
print_("name", pinfo["name"])
print_("exe", pinfo["exe"])
print_("parent", "%s %s" % (pinfo["ppid"], parent))
print_("cmdline", " ".join(pinfo["cmdline"]))
print_("started", started)
print_("user", pinfo["username"])
if POSIX and pinfo["uids"] and pinfo["gids"]:
print_("uids", "real=%s, effective=%s, saved=%s" % pinfo["uids"])
if POSIX and pinfo["gids"]:
print_("gids", "real=%s, effective=%s, saved=%s" % pinfo["gids"])
if POSIX:
print_("terminal", pinfo["terminal"] or "")
if hasattr(p, "getcwd"):
print_("cwd", pinfo["cwd"])
print_("memory", mem)
print_(
"cpu",
"%s%% (user=%s, system=%s)"
% (
pinfo["cpu_percent"],
getattr(pinfo["cpu_times"], "user", "?"),
getattr(pinfo["cpu_times"], "system", "?"),
),
)
print_("status", pinfo["status"])
print_("niceness", pinfo["nice"])
print_("num threads", pinfo["num_threads"])
if io != ACCESS_DENIED:
print_(
"I/O",
"bytes-read=%s, bytes-written=%s"
% (convert_bytes(io.read_bytes), convert_bytes(io.write_bytes)),
)
if children:
print_("children", "")
for child in children:
print_("", "pid=%s name=%s" % (child.pid, child.name()))
if pinfo["open_files"] != ACCESS_DENIED:
print_("open files", "")
for file in pinfo["open_files"]:
print_("", "fd=%s %s " % (file.fd, file.path))
if pinfo["threads"]:
print_("running threads", "")
for thread in pinfo["threads"]:
print_(
"",
"id=%s, user-time=%s, sys-time=%s"
% (thread.id, thread.user_time, thread.system_time),
)
if pinfo["connections"] not in (ACCESS_DENIED, []):
print_("open connections", "")
for conn in pinfo["connections"]:
if conn.type == socket.SOCK_STREAM:
type = "TCP"
elif conn.type == socket.SOCK_DGRAM:
type = "UDP"
else:
type = "UNIX"
lip, lport = conn.laddr
if not conn.raddr:
rip, rport = "*", "*"
else:
rip, rport = conn.raddr
print_(
"",
"%s:%s -> %s:%s type=%s status=%s"
% (lip, lport, rip, rport, type, conn.status),
)
|
def run(pid):
ACCESS_DENIED = ""
try:
p = psutil.Process(pid)
pinfo = p.as_dict(ad_value=ACCESS_DENIED)
except psutil.NoSuchProcess as err:
sys.exit(str(err))
try:
parent = p.parent()
if parent:
parent = "(%s)" % parent.name()
else:
parent = ""
except psutil.Error:
parent = ""
started = datetime.datetime.fromtimestamp(pinfo["create_time"]).strftime(
"%Y-%m-%d %H:%M"
)
io = pinfo.get("io_counters", ACCESS_DENIED)
mem = "%s%% (resident=%s, virtual=%s) " % (
round(pinfo["memory_percent"], 1),
convert_bytes(pinfo["memory_info"].rss),
convert_bytes(pinfo["memory_info"].vms),
)
children = p.children()
print_("pid", pinfo["pid"])
print_("name", pinfo["name"])
print_("exe", pinfo["exe"])
print_("parent", "%s %s" % (pinfo["ppid"], parent))
print_("cmdline", " ".join(pinfo["cmdline"]))
print_("started", started)
print_("user", pinfo["username"])
if POSIX and pinfo["uids"] and pinfo["gids"]:
print_("uids", "real=%s, effective=%s, saved=%s" % pinfo["uids"])
if POSIX and pinfo["gids"]:
print_("gids", "real=%s, effective=%s, saved=%s" % pinfo["gids"])
if POSIX:
print_("terminal", pinfo["terminal"] or "")
if hasattr(p, "getcwd"):
print_("cwd", pinfo["cwd"])
print_("memory", mem)
print_(
"cpu",
"%s%% (user=%s, system=%s)"
% (
pinfo["cpu_percent"],
getattr(pinfo["cpu_times"], "user", "?"),
getattr(pinfo["cpu_times"], "system", "?"),
),
)
print_("status", pinfo["status"])
print_("niceness", pinfo["nice"])
print_("num threads", pinfo["num_threads"])
if io != ACCESS_DENIED:
print_(
"I/O",
"bytes-read=%s, bytes-written=%s"
% (convert_bytes(io.read_bytes), convert_bytes(io.write_bytes)),
)
if children:
print_("children", "")
for child in children:
print_("", "pid=%s name=%s" % (child.pid, child.name()))
if pinfo["open_files"] != ACCESS_DENIED:
print_("open files", "")
for file in pinfo["open_files"]:
print_("", "fd=%s %s " % (file.fd, file.path))
if pinfo["threads"]:
print_("running threads", "")
for thread in pinfo["threads"]:
print_(
"",
"id=%s, user-time=%s, sys-time=%s"
% (thread.id, thread.user_time, thread.system_time),
)
if pinfo["connections"] not in (ACCESS_DENIED, []):
print_("open connections", "")
for conn in pinfo["connections"]:
if conn.type == socket.SOCK_STREAM:
type = "TCP"
elif conn.type == socket.SOCK_DGRAM:
type = "UDP"
else:
type = "UNIX"
lip, lport = conn.laddr
if not conn.raddr:
rip, rport = "*", "*"
else:
rip, rport = conn.raddr
print_(
"",
"%s:%s -> %s:%s type=%s status=%s"
% (lip, lport, rip, rport, type, conn.status),
)
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def main():
# construct a dict where 'values' are all the processes
# having 'key' as their parent
tree = collections.defaultdict(list)
for p in psutil.process_iter():
try:
tree[p.ppid()].append(p.pid)
except (psutil.NoSuchProcess, psutil.ZombieProcess):
pass
# on systems supporting PID 0, PID 0's parent is usually 0
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
print_tree(min(tree), tree)
|
def main():
# construct a dict where 'values' are all the processes
# having 'key' as their parent
tree = collections.defaultdict(list)
for p in psutil.process_iter():
try:
tree[p.ppid()].append(p.pid)
except psutil.NoSuchProcess:
pass
# on systems supporting PID 0, PID 0's parent is usually 0
if 0 in tree and 0 in tree[0]:
tree[0].remove(0)
print_tree(min(tree), tree)
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def _init(self, pid, _ignore_nsp=False):
if pid is None:
pid = os.getpid()
else:
if not _PY3 and not isinstance(pid, (int, long)):
raise TypeError("pid must be an integer (got %r)" % pid)
if pid < 0:
raise ValueError("pid must be a positive integer (got %s)" % pid)
self._pid = pid
self._name = None
self._exe = None
self._create_time = None
self._gone = False
self._hash = None
# used for caching on Windows only (on POSIX ppid may change)
self._ppid = None
# platform-specific modules define an _psplatform.Process
# implementation class
self._proc = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
# cache creation time for later use in is_running() method
try:
self.create_time()
except AccessDenied:
# we should never get here as AFAIK we're able to get
# process creation time on all platforms even as a
# limited user
pass
except ZombieProcess:
# Let's consider a zombie process as legitimate as
# tehcnically it's still alive (it can be queried,
# although not always, and it's returned by pids()).
pass
except NoSuchProcess:
if not _ignore_nsp:
msg = "no process found with pid %s" % pid
raise NoSuchProcess(pid, None, msg)
else:
self._gone = True
# This pair is supposed to indentify a Process instance
# univocally over time (the PID alone is not enough as
# it might refer to a process whose PID has been reused).
# This will be used later in __eq__() and is_running().
self._ident = (self.pid, self._create_time)
|
def _init(self, pid, _ignore_nsp=False):
if pid is None:
pid = os.getpid()
else:
if not _PY3 and not isinstance(pid, (int, long)):
raise TypeError("pid must be an integer (got %r)" % pid)
if pid < 0:
raise ValueError("pid must be a positive integer (got %s)" % pid)
self._pid = pid
self._name = None
self._exe = None
self._create_time = None
self._gone = False
self._hash = None
# used for caching on Windows only (on POSIX ppid may change)
self._ppid = None
# platform-specific modules define an _psplatform.Process
# implementation class
self._proc = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
# cache creation time for later use in is_running() method
try:
self.create_time()
except AccessDenied:
# we should never get here as AFAIK we're able to get
# process creation time on all platforms even as a
# limited user
pass
except NoSuchProcess:
if not _ignore_nsp:
msg = "no process found with pid %s" % pid
raise NoSuchProcess(pid, None, msg)
else:
self._gone = True
# This pair is supposed to indentify a Process instance
# univocally over time (the PID alone is not enough as
# it might refer to a process whose PID has been reused).
# This will be used later in __eq__() and is_running().
self._ident = (self.pid, self._create_time)
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def __str__(self):
try:
pid = self.pid
name = repr(self.name())
except ZombieProcess:
details = "(pid=%s (zombie))" % self.pid
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__, self.__class__.__name__, details)
|
def __str__(self):
try:
pid = self.pid
name = repr(self.name())
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__, self.__class__.__name__, details)
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def as_dict(self, attrs=None, ad_value=None):
"""Utility method returning process information as a
hashable dictionary.
If 'attrs' is specified it must be a list of strings
reflecting available Process class' attribute names
(e.g. ['cpu_times', 'name']) else all public (read
only) attributes are assumed.
'ad_value' is the value which gets assigned in case
AccessDenied or ZombieProcess exception is raised when
retrieving that particular process information.
"""
excluded_names = set(
[
"send_signal",
"suspend",
"resume",
"terminate",
"kill",
"wait",
"is_running",
"as_dict",
"parent",
"children",
"rlimit",
]
)
retdict = dict()
ls = set(attrs or [x for x in dir(self) if not x.startswith("get")])
for name in ls:
if name.startswith("_"):
continue
if name.startswith("set_"):
continue
if name.startswith("get_"):
msg = "%s() is deprecated; use %s() instead" % (name, name[4:])
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
name = name[4:]
if name in ls:
continue
if name == "getcwd":
msg = "getcwd() is deprecated; use cwd() instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
name = "cwd"
if name in ls:
continue
if name in excluded_names:
continue
try:
attr = getattr(self, name)
if callable(attr):
ret = attr()
else:
ret = attr
except (AccessDenied, ZombieProcess):
ret = ad_value
except NotImplementedError:
# in case of not implemented functionality (may happen
# on old or exotic systems) we want to crash only if
# the user explicitly asked for that particular attr
if attrs:
raise
continue
retdict[name] = ret
return retdict
|
def as_dict(self, attrs=None, ad_value=None):
"""Utility method returning process information as a
hashable dictionary.
If 'attrs' is specified it must be a list of strings
reflecting available Process class' attribute names
(e.g. ['cpu_times', 'name']) else all public (read
only) attributes are assumed.
'ad_value' is the value which gets assigned in case
AccessDenied exception is raised when retrieving that
particular process information.
"""
excluded_names = set(
[
"send_signal",
"suspend",
"resume",
"terminate",
"kill",
"wait",
"is_running",
"as_dict",
"parent",
"children",
"rlimit",
]
)
retdict = dict()
ls = set(attrs or [x for x in dir(self) if not x.startswith("get")])
for name in ls:
if name.startswith("_"):
continue
if name.startswith("set_"):
continue
if name.startswith("get_"):
msg = "%s() is deprecated; use %s() instead" % (name, name[4:])
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
name = name[4:]
if name in ls:
continue
if name == "getcwd":
msg = "getcwd() is deprecated; use cwd() instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
name = "cwd"
if name in ls:
continue
if name in excluded_names:
continue
try:
attr = getattr(self, name)
if callable(attr):
ret = attr()
else:
ret = attr
except AccessDenied:
ret = ad_value
except NotImplementedError:
# in case of not implemented functionality (may happen
# on old or exotic systems) we want to crash only if
# the user explicitly asked for that particular attr
if attrs:
raise
continue
retdict[name] = ret
return retdict
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def status(self):
"""The process current status as a STATUS_* constant."""
try:
return self._proc.status()
except ZombieProcess:
return STATUS_ZOMBIE
|
def status(self):
"""The process current status as a STATUS_* constant."""
return self._proc.status()
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def children(self, recursive=False):
"""Return the children of this process as a list of Process
instances, pre-emptively checking whether PID has been reused.
If recursive is True return all the parent descendants.
Example (A == this process):
A ─┐
│
├─ B (child) ─┐
│ └─ X (grandchild) ─┐
│ └─ Y (great grandchild)
├─ C (child)
└─ D (child)
>>> import psutil
>>> p = psutil.Process()
>>> p.children()
B, C, D
>>> p.children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be listed as the reference to process A
is lost.
"""
if hasattr(_psplatform, "ppid_map"):
# Windows only: obtain a {pid:ppid, ...} dict for all running
# processes in one shot (faster).
ppid_map = _psplatform.ppid_map()
else:
ppid_map = None
ret = []
if not recursive:
if ppid_map is None:
# 'slow' version, common to all platforms except Windows
for p in process_iter():
try:
if p.ppid() == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= p.create_time():
ret.append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
# Windows only (faster)
for pid, ppid in ppid_map.items():
if ppid == self.pid:
try:
child = Process(pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= child.create_time():
ret.append(child)
except (NoSuchProcess, ZombieProcess):
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = collections.defaultdict(list)
if ppid_map is None:
for p in process_iter():
try:
table[p.ppid()].append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
for pid, ppid in ppid_map.items():
try:
p = Process(pid)
table[ppid].append(p)
except (NoSuchProcess, ZombieProcess):
pass
# At this point we have a mapping table where table[self.pid]
# are the current process' children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time() <= child.create_time()
except (NoSuchProcess, ZombieProcess):
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret
|
def children(self, recursive=False):
"""Return the children of this process as a list of Process
instances, pre-emptively checking whether PID has been reused.
If recursive is True return all the parent descendants.
Example (A == this process):
A ─┐
│
├─ B (child) ─┐
│ └─ X (grandchild) ─┐
│ └─ Y (great grandchild)
├─ C (child)
└─ D (child)
>>> import psutil
>>> p = psutil.Process()
>>> p.children()
B, C, D
>>> p.children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be listed as the reference to process A
is lost.
"""
if hasattr(_psplatform, "ppid_map"):
# Windows only: obtain a {pid:ppid, ...} dict for all running
# processes in one shot (faster).
ppid_map = _psplatform.ppid_map()
else:
ppid_map = None
ret = []
if not recursive:
if ppid_map is None:
# 'slow' version, common to all platforms except Windows
for p in process_iter():
try:
if p.ppid() == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= p.create_time():
ret.append(p)
except NoSuchProcess:
pass
else:
# Windows only (faster)
for pid, ppid in ppid_map.items():
if ppid == self.pid:
try:
child = Process(pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= child.create_time():
ret.append(child)
except NoSuchProcess:
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = collections.defaultdict(list)
if ppid_map is None:
for p in process_iter():
try:
table[p.ppid()].append(p)
except NoSuchProcess:
pass
else:
for pid, ppid in ppid_map.items():
try:
p = Process(pid)
table[ppid].append(p)
except NoSuchProcess:
pass
# At this point we have a mapping table where table[self.pid]
# are the current process' children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time() <= child.create_time()
except NoSuchProcess:
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrap_exceptions(fun):
"""Decorator which translates bare OSError exceptions into
NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
if err.errno == errno.ESRCH:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
def wrap_exceptions(fun):
"""Decorator which translates bare OSError exceptions into
NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
if err.errno == errno.ESRCH:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def __init__(self, pid, name=None, ppid=None, msg=None):
Error.__init__(self)
self.pid = pid
self.ppid = ppid
self.name = name
self.msg = msg
if msg is None:
if name and ppid:
details = "(pid=%s, name=%s, ppid=%s)" % (
self.pid,
repr(self.name),
self.ppid,
)
elif name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process still exists but it's a zombie " + details
|
def __init__(self, pid, name=None, msg=None):
Error.__init__(self)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process still exists but it's a zombie " + details
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def ppid(self):
"""The process parent PID.
On Windows the return value is cached after first call.
"""
# On POSIX we don't want to cache the ppid as it may unexpectedly
# change to 1 (init) in case this process turns into a zombie:
# https://github.com/giampaolo/psutil/issues/321
# http://stackoverflow.com/questions/356722/
# XXX should we check creation time here rather than in
# Process.parent()?
if _POSIX:
ppid = self._proc.ppid()
else:
if self._ppid is None:
ppid = self._proc.ppid()
self._ppid = ppid
self._proc._ppid = ppid
return ppid
|
def ppid(self):
"""The process parent PID.
On Windows the return value is cached after first call.
"""
# On POSIX we don't want to cache the ppid as it may unexpectedly
# change to 1 (init) in case this process turns into a zombie:
# https://github.com/giampaolo/psutil/issues/321
# http://stackoverflow.com/questions/356722/
# XXX should we check creation time here rather than in
# Process.parent()?
if _POSIX:
return self._proc.ppid()
else:
if self._ppid is None:
self._ppid = self._proc.ppid()
return self._ppid
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrap_exceptions(fun):
"""Decorator which translates bare OSError exceptions into
NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
if err.errno == errno.ESRCH:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
def wrap_exceptions(fun):
"""Decorator which translates bare OSError exceptions into
NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
if err.errno == errno.ESRCH:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
if err.errno == errno.ESRCH:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
if err.errno == errno.ESRCH:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
|
def __init__(self, pid):
self.pid = pid
self._name = None
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrap_exceptions_w_zombie(fun):
"""Same as above but also handles zombies."""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return wrap_exceptions(fun)(self)
except NoSuchProcess:
if not pid_exists(self.pid):
raise
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
return wrapper
|
def wrap_exceptions_w_zombie(fun):
"""Same as above but also handles zombies."""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return wrap_exceptions(fun)(self)
except NoSuchProcess:
if not pid_exists(self.pid):
raise
else:
raise ZombieProcess(self.pid, self._name)
return wrapper
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrapper(self, *args, **kwargs):
try:
return wrap_exceptions(fun)(self)
except NoSuchProcess:
if not pid_exists(self.pid):
raise
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
|
def wrapper(self, *args, **kwargs):
try:
return wrap_exceptions(fun)(self)
except NoSuchProcess:
if not pid_exists(self.pid):
raise
else:
raise ZombieProcess(self.pid, self._name)
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError) as err:
if err.errno in (errno.ENOENT, errno.ESRCH):
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s" % self.pid):
return ""
else:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
# readlink() might return paths containing null bytes ('\x00').
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
exe = exe.split("\x00")[0]
if exe.endswith(" (deleted)") and not os.path.exists(exe):
exe = exe[:-10]
return exe
|
def exe(self):
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError) as err:
if err.errno in (errno.ENOENT, errno.ESRCH):
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s" % self.pid):
return ""
else:
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
# readlink() might return paths containing null bytes ('\x00').
# Certain names have ' (deleted)' appended. Usually this is
# bogus as the file actually exists. Either way that's not
# important as we don't want to discriminate executables which
# have been deleted.
exe = exe.split("\x00")[0]
if exe.endswith(" (deleted)") and not os.path.exists(exe):
exe = exe[:-10]
return exe
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def rlimit(self, resource, limits=None):
# if pid is 0 prlimit() applies to the calling process and
# we don't want that
if self.pid == 0:
raise ValueError("can't use prlimit() against PID 0 process")
try:
if limits is None:
# get
return cext.linux_prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError("second argument must be a (soft, hard) tuple")
soft, hard = limits
cext.linux_prlimit(self.pid, resource, soft, hard)
except OSError as err:
if err.errno == errno.ENOSYS and pid_exists(self.pid):
# I saw this happening on Travis:
# https://travis-ci.org/giampaolo/psutil/jobs/51368273
raise ZombieProcess(self.pid, self._name, self._ppid)
else:
raise
|
def rlimit(self, resource, limits=None):
# if pid is 0 prlimit() applies to the calling process and
# we don't want that
if self.pid == 0:
raise ValueError("can't use prlimit() against PID 0 process")
try:
if limits is None:
# get
return cext.linux_prlimit(self.pid, resource)
else:
# set
if len(limits) != 2:
raise ValueError("second argument must be a (soft, hard) tuple")
soft, hard = limits
cext.linux_prlimit(self.pid, resource, soft, hard)
except OSError as err:
if err.errno == errno.ENOSYS and pid_exists(self.pid):
# I saw this happening on Travis:
# https://travis-ci.org/giampaolo/psutil/jobs/51368273
raise ZombieProcess(self.pid, self._name)
else:
raise
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
def wrap_exceptions(fun):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
return wrapper
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name, self._ppid)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except EnvironmentError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None or ZombieProcess is None:
raise
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
if err.errno in (errno.ENOENT, errno.ESRCH):
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._name)
else:
raise ZombieProcess(self.pid, self._name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._name)
raise
|
https://github.com/giampaolo/psutil/issues/428
|
How to reproduce:
1. start Photoshop CS6 on a Mountain Lion OSX
2. import psutil; [x.as_dict() for x in psutil.process_iter()] # (in .py file, ipython)
What is the expected output?
A long list of processes and related information
What do you see instead?
$ python test.py
Traceback (most recent call last):
File "test.py", line 3, in <module>
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 225, in as_dict
ret = attr()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.py",
line 414, in get_nice
return self._platform_impl.get_process_nice()
File
"/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.py",
line 151, in wrapper
raise NoSuchProcess(self.pid, self._process_name)
psutil._error.NoSuchProcess: process no longer exists (pid=46244)
or within iPython notebook:
[x.as_dict() for x in psutil.process_iter() if x.is_running()]
---------------------------------------------------------------------------
NoSuchProcess Traceback (most recent call last)
<ipython-input-108-a71c6dffe397> in <module>()
----> 1 [x.as_dict() for x in psutil.process_iter() if x.is_running()]
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in as_dict(self, attrs, ad_value)
223 ret = attr(interval=0)
224 else:
--> 225 ret = attr()
226 else:
227 ret = attr
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/__init__.pyc
in get_nice(self)
412 def get_nice(self):
413 """Get process niceness (priority)."""
--> 414 return self._platform_impl.get_process_nice()
415
416 @_assert_pid_not_reused
/Users/rico/.virtualenvs/temp/lib/python2.7/site-packages/psutil/_psosx.pyc in
wrapper(self, *args, **kwargs)
149 err = sys.exc_info()[1]
150 if err.errno == errno.ESRCH:
--> 151 raise NoSuchProcess(self.pid, self._process_name)
152 if err.errno in (errno.EPERM, errno.EACCES):
153 raise AccessDenied(self.pid, self._process_name)
NoSuchProcess: process no longer exists (pid=46243, name='adobe_licutil')
When I close Photoshop, the error will not show up. When starting it again the
error reappears.
An additional is_running() check within the list comprehension does not change
a thing and running the code several times will not change the reported pid.
|
psutil._error.NoSuchProcess
|
def cpu_affinity(self, cpus=None):
"""Get or set process CPU affinity.
If specified 'cpus' must be a list of CPUs for which you
want to set the affinity (e.g. [0, 1]).
(Windows, Linux and BSD only).
"""
if cpus is None:
return self._proc.cpu_affinity_get()
else:
self._proc.cpu_affinity_set(cpus)
|
def cpu_affinity(self, cpus=None):
"""Get or set process CPU affinity.
If specified 'cpus' must be a list of CPUs for which you
want to set the affinity (e.g. [0, 1]).
"""
if cpus is None:
return self._proc.cpu_affinity_get()
else:
self._proc.cpu_affinity_set(cpus)
|
https://github.com/giampaolo/psutil/issues/569
|
======================================================================
FAIL: test_cpu_count_physical (__main__.TestModuleFunctionsLeaks)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/test_memory_leaks.py", line 340, in test_cpu_count_physical
self.execute('cpu_count')
File "test/test_memory_leaks.py", line 86, in execute
% (rss2, rss3, difference))
AssertionError: rss2=15773696, rss3=19410944, difference=3637248
|
AssertionError
|
def preprocess_and_wrap(
broadcast=None, wrap_like=None, match_unit=False, to_magnitude=False
):
"""Return decorator to wrap array calculations for type flexibility.
Assuming you have a calculation that works internally with `pint.Quantity` or
`numpy.ndarray`, this will wrap the function to be able to handle `xarray.DataArray` and
`pint.Quantity` as well (assuming appropriate match to one of the input arguments).
Parameters
----------
broadcast : iterable of str or None
Iterable of string labels for arguments to broadcast against each other using xarray,
assuming they are supplied as `xarray.DataArray`. No automatic broadcasting will occur
with default of None.
wrap_like : str or array-like or tuple of str or tuple of array-like or None
Wrap the calculation output following a particular input argument (if str) or data
object (if array-like). If tuple, will assume output is in the form of a tuple,
and wrap iteratively according to the str or array-like contained within. If None,
will not wrap output.
match_unit : bool
If true, force the unit of the final output to be that of wrapping object (as
determined by wrap_like), no matter the original calculation output. Defaults to
False.
to_magnitude : bool
If true, downcast xarray and Pint arguments to their magnitude. If false, downcast
xarray arguments to Quantity, and do not change other array-like arguments.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
# Auto-broadcast select xarray arguments, and update bound_args
if broadcast is not None:
arg_names_to_broadcast = tuple(
arg_name
for arg_name in broadcast
if arg_name in bound_args.arguments
and isinstance(
bound_args.arguments[arg_name], (xr.DataArray, xr.Variable)
)
)
broadcasted_args = xr.broadcast(
*(
bound_args.arguments[arg_name]
for arg_name in arg_names_to_broadcast
)
)
for i, arg_name in enumerate(arg_names_to_broadcast):
bound_args.arguments[arg_name] = broadcasted_args[i]
# Cast all Variables to their data and warn
# (need to do before match finding, since we don't want to rewrap as Variable)
def cast_variables(arg, arg_name):
warnings.warn(
f"Argument {arg_name} given as xarray Variable...casting to its data. "
"xarray DataArrays are recommended instead."
)
return arg.data
_mutate_arguments(bound_args, xr.Variable, cast_variables)
# Obtain proper match if referencing an input
match = list(wrap_like) if isinstance(wrap_like, tuple) else wrap_like
if isinstance(wrap_like, str):
match = bound_args.arguments[wrap_like]
elif isinstance(wrap_like, tuple):
for i, arg in enumerate(wrap_like):
if isinstance(arg, str):
match[i] = bound_args.arguments[arg]
# Cast all DataArrays to Pint Quantities
_mutate_arguments(
bound_args, xr.DataArray, lambda arg, _: arg.metpy.unit_array
)
# Optionally cast all Quantities to their magnitudes
if to_magnitude:
_mutate_arguments(bound_args, units.Quantity, lambda arg, _: arg.m)
# Evaluate inner calculation
result = func(*bound_args.args, **bound_args.kwargs)
# Wrap output based on match and match_unit
if match is None:
return result
else:
if match_unit:
wrapping = _wrap_output_like_matching_units
else:
wrapping = _wrap_output_like_not_matching_units
if isinstance(match, list):
return tuple(wrapping(*args) for args in zip(result, match))
else:
return wrapping(result, match)
return wrapper
return decorator
|
def preprocess_and_wrap(
broadcast=None, wrap_like=None, match_unit=False, to_magnitude=False
):
"""Return decorator to wrap array calculations for type flexibility.
Assuming you have a calculation that works internally with `pint.Quantity` or
`numpy.ndarray`, this will wrap the function to be able to handle `xarray.DataArray` and
`pint.Quantity` as well (assuming appropriate match to one of the input arguments).
Parameters
----------
broadcast : iterable of str or None
Iterable of string labels for arguments to broadcast against each other using xarray,
assuming they are supplied as `xarray.DataArray`. No automatic broadcasting will occur
with default of None.
wrap_like : str or array-like or tuple of str or tuple of array-like or None
Wrap the calculation output following a particular input argument (if str) or data
object (if array-like). If tuple, will assume output is in the form of a tuple,
and wrap iteratively according to the str or array-like contained within. If None,
will not wrap output.
match_unit : bool
If true, force the unit of the final output to be that of wrapping object (as
determined by wrap_like), no matter the original calculation output. Defaults to
False.
to_magnitude : bool
If true, downcast xarray and Pint arguments to their magnitude. If false, downcast
xarray arguments to Quantity, and do not change other array-like arguments.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
# Auto-broadcast select xarray arguments, and update bound_args
if broadcast is not None:
arg_names_to_broadcast = tuple(
arg_name
for arg_name in broadcast
if arg_name in bound_args.arguments
and isinstance(
bound_args.arguments[arg_name], (xr.DataArray, xr.Variable)
)
)
broadcasted_args = xr.broadcast(
*(
bound_args.arguments[arg_name]
for arg_name in arg_names_to_broadcast
)
)
for i, arg_name in enumerate(arg_names_to_broadcast):
bound_args.arguments[arg_name] = broadcasted_args[i]
# Cast all Variables to their data and warn
# (need to do before match finding, since we don't want to rewrap as Variable)
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], xr.Variable):
warnings.warn(
f"Argument {arg_name} given as xarray Variable...casting to its data. "
"xarray DataArrays are recommended instead."
)
bound_args.arguments[arg_name] = bound_args.arguments[arg_name].data
# Obtain proper match if referencing an input
match = list(wrap_like) if isinstance(wrap_like, tuple) else wrap_like
if isinstance(wrap_like, str):
match = bound_args.arguments[wrap_like]
elif isinstance(wrap_like, tuple):
for i, arg in enumerate(wrap_like):
if isinstance(arg, str):
match[i] = bound_args.arguments[arg]
# Cast all DataArrays to Pint Quantities
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], xr.DataArray):
bound_args.arguments[arg_name] = bound_args.arguments[
arg_name
].metpy.unit_array
# Optionally cast all Quantities to their magnitudes
if to_magnitude:
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], units.Quantity):
bound_args.arguments[arg_name] = bound_args.arguments[
arg_name
].m
# Evaluate inner calculation
result = func(*bound_args.args, **bound_args.kwargs)
# Wrap output based on match and match_unit
if match is None:
return result
else:
if match_unit:
wrapping = _wrap_output_like_matching_units
else:
wrapping = _wrap_output_like_not_matching_units
if isinstance(match, list):
return tuple(wrapping(*args) for args in zip(result, match))
else:
return wrapping(result, match)
return wrapper
return decorator
|
https://github.com/Unidata/MetPy/issues/1603
|
Traceback (most recent call last):
File "moist_4panel.py", line 168, in <module>
pw[x,y] = mpcalc.precipitable_water(ds['P'].isel(latitude=x,longitude=y),ds['TD'].isel(latitude=x,longitude=y),bottom=pbot,top=ptop).m
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1174, in wrapper
return wrapping(result, match)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1214, in _wrap_output_like_not_matching_units
xr.DataArray(result, coords=match.coords, dims=match.dims) if output_xarray
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 344, in __init__
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 121, in _infer_coords_and_dims
raise ValueError(
ValueError: different number of dimensions on data and dims: 0 vs 1
|
ValueError
|
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
# Auto-broadcast select xarray arguments, and update bound_args
if broadcast is not None:
arg_names_to_broadcast = tuple(
arg_name
for arg_name in broadcast
if arg_name in bound_args.arguments
and isinstance(
bound_args.arguments[arg_name], (xr.DataArray, xr.Variable)
)
)
broadcasted_args = xr.broadcast(
*(bound_args.arguments[arg_name] for arg_name in arg_names_to_broadcast)
)
for i, arg_name in enumerate(arg_names_to_broadcast):
bound_args.arguments[arg_name] = broadcasted_args[i]
# Cast all Variables to their data and warn
# (need to do before match finding, since we don't want to rewrap as Variable)
def cast_variables(arg, arg_name):
warnings.warn(
f"Argument {arg_name} given as xarray Variable...casting to its data. "
"xarray DataArrays are recommended instead."
)
return arg.data
_mutate_arguments(bound_args, xr.Variable, cast_variables)
# Obtain proper match if referencing an input
match = list(wrap_like) if isinstance(wrap_like, tuple) else wrap_like
if isinstance(wrap_like, str):
match = bound_args.arguments[wrap_like]
elif isinstance(wrap_like, tuple):
for i, arg in enumerate(wrap_like):
if isinstance(arg, str):
match[i] = bound_args.arguments[arg]
# Cast all DataArrays to Pint Quantities
_mutate_arguments(bound_args, xr.DataArray, lambda arg, _: arg.metpy.unit_array)
# Optionally cast all Quantities to their magnitudes
if to_magnitude:
_mutate_arguments(bound_args, units.Quantity, lambda arg, _: arg.m)
# Evaluate inner calculation
result = func(*bound_args.args, **bound_args.kwargs)
# Wrap output based on match and match_unit
if match is None:
return result
else:
if match_unit:
wrapping = _wrap_output_like_matching_units
else:
wrapping = _wrap_output_like_not_matching_units
if isinstance(match, list):
return tuple(wrapping(*args) for args in zip(result, match))
else:
return wrapping(result, match)
return wrapper
|
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
# Auto-broadcast select xarray arguments, and update bound_args
if broadcast is not None:
arg_names_to_broadcast = tuple(
arg_name
for arg_name in broadcast
if arg_name in bound_args.arguments
and isinstance(
bound_args.arguments[arg_name], (xr.DataArray, xr.Variable)
)
)
broadcasted_args = xr.broadcast(
*(bound_args.arguments[arg_name] for arg_name in arg_names_to_broadcast)
)
for i, arg_name in enumerate(arg_names_to_broadcast):
bound_args.arguments[arg_name] = broadcasted_args[i]
# Cast all Variables to their data and warn
# (need to do before match finding, since we don't want to rewrap as Variable)
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], xr.Variable):
warnings.warn(
f"Argument {arg_name} given as xarray Variable...casting to its data. "
"xarray DataArrays are recommended instead."
)
bound_args.arguments[arg_name] = bound_args.arguments[arg_name].data
# Obtain proper match if referencing an input
match = list(wrap_like) if isinstance(wrap_like, tuple) else wrap_like
if isinstance(wrap_like, str):
match = bound_args.arguments[wrap_like]
elif isinstance(wrap_like, tuple):
for i, arg in enumerate(wrap_like):
if isinstance(arg, str):
match[i] = bound_args.arguments[arg]
# Cast all DataArrays to Pint Quantities
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], xr.DataArray):
bound_args.arguments[arg_name] = bound_args.arguments[
arg_name
].metpy.unit_array
# Optionally cast all Quantities to their magnitudes
if to_magnitude:
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], units.Quantity):
bound_args.arguments[arg_name] = bound_args.arguments[arg_name].m
# Evaluate inner calculation
result = func(*bound_args.args, **bound_args.kwargs)
# Wrap output based on match and match_unit
if match is None:
return result
else:
if match_unit:
wrapping = _wrap_output_like_matching_units
else:
wrapping = _wrap_output_like_not_matching_units
if isinstance(match, list):
return tuple(wrapping(*args) for args in zip(result, match))
else:
return wrapping(result, match)
return wrapper
|
https://github.com/Unidata/MetPy/issues/1603
|
Traceback (most recent call last):
File "moist_4panel.py", line 168, in <module>
pw[x,y] = mpcalc.precipitable_water(ds['P'].isel(latitude=x,longitude=y),ds['TD'].isel(latitude=x,longitude=y),bottom=pbot,top=ptop).m
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1174, in wrapper
return wrapping(result, match)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1214, in _wrap_output_like_not_matching_units
xr.DataArray(result, coords=match.coords, dims=match.dims) if output_xarray
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 344, in __init__
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 121, in _infer_coords_and_dims
raise ValueError(
ValueError: different number of dimensions on data and dims: 0 vs 1
|
ValueError
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
# Auto-broadcast select xarray arguments, and update bound_args
if broadcast is not None:
arg_names_to_broadcast = tuple(
arg_name
for arg_name in broadcast
if arg_name in bound_args.arguments
and isinstance(bound_args.arguments[arg_name], (xr.DataArray, xr.Variable))
)
broadcasted_args = xr.broadcast(
*(bound_args.arguments[arg_name] for arg_name in arg_names_to_broadcast)
)
for i, arg_name in enumerate(arg_names_to_broadcast):
bound_args.arguments[arg_name] = broadcasted_args[i]
# Cast all Variables to their data and warn
# (need to do before match finding, since we don't want to rewrap as Variable)
def cast_variables(arg, arg_name):
warnings.warn(
f"Argument {arg_name} given as xarray Variable...casting to its data. "
"xarray DataArrays are recommended instead."
)
return arg.data
_mutate_arguments(bound_args, xr.Variable, cast_variables)
# Obtain proper match if referencing an input
match = list(wrap_like) if isinstance(wrap_like, tuple) else wrap_like
if isinstance(wrap_like, str):
match = bound_args.arguments[wrap_like]
elif isinstance(wrap_like, tuple):
for i, arg in enumerate(wrap_like):
if isinstance(arg, str):
match[i] = bound_args.arguments[arg]
# Cast all DataArrays to Pint Quantities
_mutate_arguments(bound_args, xr.DataArray, lambda arg, _: arg.metpy.unit_array)
# Optionally cast all Quantities to their magnitudes
if to_magnitude:
_mutate_arguments(bound_args, units.Quantity, lambda arg, _: arg.m)
# Evaluate inner calculation
result = func(*bound_args.args, **bound_args.kwargs)
# Wrap output based on match and match_unit
if match is None:
return result
else:
if match_unit:
wrapping = _wrap_output_like_matching_units
else:
wrapping = _wrap_output_like_not_matching_units
if isinstance(match, list):
return tuple(wrapping(*args) for args in zip(result, match))
else:
return wrapping(result, match)
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
# Auto-broadcast select xarray arguments, and update bound_args
if broadcast is not None:
arg_names_to_broadcast = tuple(
arg_name
for arg_name in broadcast
if arg_name in bound_args.arguments
and isinstance(bound_args.arguments[arg_name], (xr.DataArray, xr.Variable))
)
broadcasted_args = xr.broadcast(
*(bound_args.arguments[arg_name] for arg_name in arg_names_to_broadcast)
)
for i, arg_name in enumerate(arg_names_to_broadcast):
bound_args.arguments[arg_name] = broadcasted_args[i]
# Cast all Variables to their data and warn
# (need to do before match finding, since we don't want to rewrap as Variable)
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], xr.Variable):
warnings.warn(
f"Argument {arg_name} given as xarray Variable...casting to its data. "
"xarray DataArrays are recommended instead."
)
bound_args.arguments[arg_name] = bound_args.arguments[arg_name].data
# Obtain proper match if referencing an input
match = list(wrap_like) if isinstance(wrap_like, tuple) else wrap_like
if isinstance(wrap_like, str):
match = bound_args.arguments[wrap_like]
elif isinstance(wrap_like, tuple):
for i, arg in enumerate(wrap_like):
if isinstance(arg, str):
match[i] = bound_args.arguments[arg]
# Cast all DataArrays to Pint Quantities
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], xr.DataArray):
bound_args.arguments[arg_name] = bound_args.arguments[
arg_name
].metpy.unit_array
# Optionally cast all Quantities to their magnitudes
if to_magnitude:
for arg_name in bound_args.arguments:
if isinstance(bound_args.arguments[arg_name], units.Quantity):
bound_args.arguments[arg_name] = bound_args.arguments[arg_name].m
# Evaluate inner calculation
result = func(*bound_args.args, **bound_args.kwargs)
# Wrap output based on match and match_unit
if match is None:
return result
else:
if match_unit:
wrapping = _wrap_output_like_matching_units
else:
wrapping = _wrap_output_like_not_matching_units
if isinstance(match, list):
return tuple(wrapping(*args) for args in zip(result, match))
else:
return wrapping(result, match)
|
https://github.com/Unidata/MetPy/issues/1603
|
Traceback (most recent call last):
File "moist_4panel.py", line 168, in <module>
pw[x,y] = mpcalc.precipitable_water(ds['P'].isel(latitude=x,longitude=y),ds['TD'].isel(latitude=x,longitude=y),bottom=pbot,top=ptop).m
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1174, in wrapper
return wrapping(result, match)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1214, in _wrap_output_like_not_matching_units
xr.DataArray(result, coords=match.coords, dims=match.dims) if output_xarray
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 344, in __init__
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 121, in _infer_coords_and_dims
raise ValueError(
ValueError: different number of dimensions on data and dims: 0 vs 1
|
ValueError
|
def add_grid_arguments_from_xarray(func):
"""Fill in optional arguments like dx/dy from DataArray arguments."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
grid_prototype = None
for da in dataarray_arguments(bound_args):
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number(
"x"
)
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number(
"y"
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except (AttributeError, ValueError):
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if (
"latitude" in bound_args.arguments
and bound_args.arguments["latitude"] is None
):
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
|
def add_grid_arguments_from_xarray(func):
"""Fill in optional arguments like dx/dy from DataArray arguments."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
grid_prototype = None
for da in dataarray_arguments:
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number(
"x"
)
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number(
"y"
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except (AttributeError, ValueError):
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if (
"latitude" in bound_args.arguments
and bound_args.arguments["latitude"] is None
):
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
|
https://github.com/Unidata/MetPy/issues/1603
|
Traceback (most recent call last):
File "moist_4panel.py", line 168, in <module>
pw[x,y] = mpcalc.precipitable_water(ds['P'].isel(latitude=x,longitude=y),ds['TD'].isel(latitude=x,longitude=y),bottom=pbot,top=ptop).m
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1174, in wrapper
return wrapping(result, match)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1214, in _wrap_output_like_not_matching_units
xr.DataArray(result, coords=match.coords, dims=match.dims) if output_xarray
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 344, in __init__
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 121, in _infer_coords_and_dims
raise ValueError(
ValueError: different number of dimensions on data and dims: 0 vs 1
|
ValueError
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
grid_prototype = None
for da in dataarray_arguments(bound_args):
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number("x")
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number("y")
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except (AttributeError, ValueError):
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if "latitude" in bound_args.arguments and bound_args.arguments["latitude"] is None:
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
grid_prototype = None
for da in dataarray_arguments:
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number("x")
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number("y")
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except (AttributeError, ValueError):
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if "latitude" in bound_args.arguments and bound_args.arguments["latitude"] is None:
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
|
https://github.com/Unidata/MetPy/issues/1603
|
Traceback (most recent call last):
File "moist_4panel.py", line 168, in <module>
pw[x,y] = mpcalc.precipitable_water(ds['P'].isel(latitude=x,longitude=y),ds['TD'].isel(latitude=x,longitude=y),bottom=pbot,top=ptop).m
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1174, in wrapper
return wrapping(result, match)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1214, in _wrap_output_like_not_matching_units
xr.DataArray(result, coords=match.coords, dims=match.dims) if output_xarray
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 344, in __init__
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 121, in _infer_coords_and_dims
raise ValueError(
ValueError: different number of dimensions on data and dims: 0 vs 1
|
ValueError
|
def add_vertical_dim_from_xarray(func):
"""Fill in optional vertical_dim from DataArray argument."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Fill in vertical_dim
if "vertical_dim" in bound_args.arguments:
a = next(dataarray_arguments(bound_args), None)
if a is not None:
try:
bound_args.arguments["vertical_dim"] = a.metpy.find_axis_number(
"vertical"
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to initial dimension."
)
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
|
def add_vertical_dim_from_xarray(func):
"""Fill in optional vertical_dim from DataArray argument."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray in arguments
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
# Fill in vertical_dim
if len(dataarray_arguments) > 0 and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = dataarray_arguments[
0
].metpy.find_axis_number("vertical")
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to initial dimension."
)
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
|
https://github.com/Unidata/MetPy/issues/1603
|
Traceback (most recent call last):
File "moist_4panel.py", line 168, in <module>
pw[x,y] = mpcalc.precipitable_water(ds['P'].isel(latitude=x,longitude=y),ds['TD'].isel(latitude=x,longitude=y),bottom=pbot,top=ptop).m
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1174, in wrapper
return wrapping(result, match)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1214, in _wrap_output_like_not_matching_units
xr.DataArray(result, coords=match.coords, dims=match.dims) if output_xarray
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 344, in __init__
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 121, in _infer_coords_and_dims
raise ValueError(
ValueError: different number of dimensions on data and dims: 0 vs 1
|
ValueError
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Fill in vertical_dim
if "vertical_dim" in bound_args.arguments:
a = next(dataarray_arguments(bound_args), None)
if a is not None:
try:
bound_args.arguments["vertical_dim"] = a.metpy.find_axis_number(
"vertical"
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to initial dimension."
)
return func(*bound_args.args, **bound_args.kwargs)
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray in arguments
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
# Fill in vertical_dim
if len(dataarray_arguments) > 0 and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = dataarray_arguments[
0
].metpy.find_axis_number("vertical")
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to initial dimension."
)
return func(*bound_args.args, **bound_args.kwargs)
|
https://github.com/Unidata/MetPy/issues/1603
|
Traceback (most recent call last):
File "moist_4panel.py", line 168, in <module>
pw[x,y] = mpcalc.precipitable_water(ds['P'].isel(latitude=x,longitude=y),ds['TD'].isel(latitude=x,longitude=y),bottom=pbot,top=ptop).m
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1174, in wrapper
return wrapping(result, match)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/metpy/xarray.py", line 1214, in _wrap_output_like_not_matching_units
xr.DataArray(result, coords=match.coords, dims=match.dims) if output_xarray
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 344, in __init__
coords, dims = _infer_coords_and_dims(data.shape, coords, dims)
File "/d1/anaconda3/envs/era5/lib/python3.8/site-packages/xarray/core/dataarray.py", line 121, in _infer_coords_and_dims
raise ValueError(
ValueError: different number of dimensions on data and dims: 0 vs 1
|
ValueError
|
def moist_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `reference_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
Only reliably functions on 1D profiles (not higher-dimension vertical cross sections or
grids).
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = (
(mpconsts.Rd * t + mpconsts.Lv * rs)
/ (
mpconsts.Cp_d
+ (
mpconsts.Lv
* mpconsts.Lv
* rs
* mpconsts.epsilon
/ (mpconsts.Rd * t * t)
)
)
).to("kelvin")
return (frac / p).magnitude
pressure = np.atleast_1d(pressure)
if reference_pressure is None:
reference_pressure = pressure[0]
pressure = pressure.to("mbar")
reference_pressure = reference_pressure.to("mbar")
temperature = np.atleast_1d(temperature)
side = "left"
pres_decreasing = pressure[0] > pressure[-1]
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = "right"
ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if _greater_or_close(reference_pressure, pressure.min()):
# Integrate downward in pressure
pres_down = np.append(
reference_pressure.m, pressure[(ref_pres_idx - 1) :: -1].m
)
trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if reference_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)
trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
|
def moist_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `reference_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
Only reliably functions on 1D profiles (not higher-dimension vertical cross sections or
grids).
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = (
(mpconsts.Rd * t + mpconsts.Lv * rs)
/ (
mpconsts.Cp_d
+ (
mpconsts.Lv
* mpconsts.Lv
* rs
* mpconsts.epsilon
/ (mpconsts.Rd * t * t)
)
)
).to("kelvin")
return (frac / p).magnitude
if reference_pressure is None:
reference_pressure = pressure[0]
pressure = pressure.to("mbar")
reference_pressure = reference_pressure.to("mbar")
temperature = np.atleast_1d(temperature)
side = "left"
pres_decreasing = pressure[0] > pressure[-1]
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = "right"
ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if reference_pressure > pressure.min():
# Integrate downward in pressure
pres_down = np.append(
reference_pressure.m, pressure[(ref_pres_idx - 1) :: -1].m
)
trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if reference_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)
trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
|
https://github.com/Unidata/MetPy/issues/1332
|
Traceback (most recent call last):
File ".\MetPy_Sounding.py", line 42, in <module>
Tw = mpcalc.wet_bulb_temperature(p, T, Td).to('degC')
File "C:\ProgramData\Anaconda3\lib\site-packages\metpy\xarray.py", line 655, in wrapper
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\metpy\units.py", line 320, in wrapper
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\metpy\calc\thermo.py", line 2413, in wet_bulb_temperature
ret[...] = moist_adiabat_temperatures[-1].magnitude
File "C:\ProgramData\Anaconda3\lib\site-packages\pint\quantity.py", line 1719, in __getitem__
return type(self)(self._magnitude[key], self._units)
IndexError: index -1 is out of bounds for axis 0 with size 0
|
IndexError
|
def wet_bulb_temperature(pressure, temperature, dewpoint):
"""Calculate the wet-bulb temperature using Normand's rule.
This function calculates the wet-bulb temperature using the Normand method. The LCL is
computed, and that parcel brought down to the starting pressure along a moist adiabat.
The Normand method (and others) are described and compared by [Knox2017]_.
Parameters
----------
pressure : `pint.Quantity`
Initial atmospheric pressure
temperature : `pint.Quantity`
Initial atmospheric temperature
dewpoint : `pint.Quantity`
Initial atmospheric dewpoint
Returns
-------
`pint.Quantity`
Wet-bulb temperature
See Also
--------
lcl, moist_lapse
Notes
-----
Since this function iteratively applies a parcel calculation, it should be used with
caution on large arrays.
"""
if not hasattr(pressure, "shape"):
pressure = np.atleast_1d(pressure)
temperature = np.atleast_1d(temperature)
dewpoint = np.atleast_1d(dewpoint)
lcl_press, lcl_temp = lcl(pressure, temperature, dewpoint)
it = np.nditer(
[pressure.magnitude, lcl_press.magnitude, lcl_temp.magnitude, None],
op_dtypes=["float", "float", "float", "float"],
flags=["buffered"],
)
for press, lpress, ltemp, ret in it:
press = press * pressure.units
lpress = lpress * lcl_press.units
ltemp = ltemp * lcl_temp.units
moist_adiabat_temperatures = moist_lapse(press, ltemp, lpress)
ret[...] = moist_adiabat_temperatures.magnitude
# If we started with a scalar, return a scalar
ret = it.operands[3]
if ret.size == 1:
ret = ret[0]
return ret * moist_adiabat_temperatures.units
|
def wet_bulb_temperature(pressure, temperature, dewpoint):
"""Calculate the wet-bulb temperature using Normand's rule.
This function calculates the wet-bulb temperature using the Normand method. The LCL is
computed, and that parcel brought down to the starting pressure along a moist adiabat.
The Normand method (and others) are described and compared by [Knox2017]_.
Parameters
----------
pressure : `pint.Quantity`
Initial atmospheric pressure
temperature : `pint.Quantity`
Initial atmospheric temperature
dewpoint : `pint.Quantity`
Initial atmospheric dewpoint
Returns
-------
`pint.Quantity`
Wet-bulb temperature
See Also
--------
lcl, moist_lapse
Notes
-----
Since this function iteratively applies a parcel calculation, it should be used with
caution on large arrays.
"""
if not hasattr(pressure, "shape"):
pressure = np.atleast_1d(pressure)
temperature = np.atleast_1d(temperature)
dewpoint = np.atleast_1d(dewpoint)
it = np.nditer(
[pressure, temperature, dewpoint, None],
op_dtypes=["float", "float", "float", "float"],
flags=["buffered"],
)
for press, temp, dewp, ret in it:
press = press * pressure.units
temp = temp * temperature.units
dewp = dewp * dewpoint.units
lcl_pressure, lcl_temperature = lcl(press, temp, dewp)
moist_adiabat_temperatures = moist_lapse(
concatenate([lcl_pressure, press]), lcl_temperature
)
ret[...] = moist_adiabat_temperatures[-1].magnitude
# If we started with a scalar, return a scalar
if it.operands[3].size == 1:
return it.operands[3][0] * moist_adiabat_temperatures.units
return it.operands[3] * moist_adiabat_temperatures.units
|
https://github.com/Unidata/MetPy/issues/1332
|
Traceback (most recent call last):
File ".\MetPy_Sounding.py", line 42, in <module>
Tw = mpcalc.wet_bulb_temperature(p, T, Td).to('degC')
File "C:\ProgramData\Anaconda3\lib\site-packages\metpy\xarray.py", line 655, in wrapper
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\metpy\units.py", line 320, in wrapper
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\metpy\calc\thermo.py", line 2413, in wet_bulb_temperature
ret[...] = moist_adiabat_temperatures[-1].magnitude
File "C:\ProgramData\Anaconda3\lib\site-packages\pint\quantity.py", line 1719, in __getitem__
return type(self)(self._magnitude[key], self._units)
IndexError: index -1 is out of bounds for axis 0 with size 0
|
IndexError
|
def _build(self):
"""Build the plot by calling needed plotting methods as necessary."""
lon, lat, data = self.plotdata
# Use the cartopy map projection to transform station locations to the map and
# then refine the number of stations plotted by setting a radius
if self.parent._proj_obj == ccrs.PlateCarree():
scale = 1.0
else:
scale = 100000.0
point_locs = self.parent._proj_obj.transform_points(ccrs.PlateCarree(), lon, lat)
subset = reduce_point_density(point_locs, self.reduce_points * scale)
self.handle = StationPlot(
self.parent.ax,
lon[subset],
lat[subset],
clip_on=True,
transform=ccrs.PlateCarree(),
fontsize=10,
)
for i, ob_type in enumerate(self.fields):
field_kwargs = {}
if len(self.locations) > 1:
location = self.locations[i]
else:
location = self.locations[0]
if len(self.colors) > 1:
field_kwargs["color"] = self.colors[i]
else:
field_kwargs["color"] = self.colors[0]
if len(self.formats) > 1:
field_kwargs["formatter"] = self.formats[i]
else:
field_kwargs["formatter"] = self.formats[0]
if len(self.plot_units) > 1:
field_kwargs["plot_units"] = self.plot_units[i]
else:
field_kwargs["plot_units"] = self.plot_units[0]
if hasattr(self.data, "units") and (field_kwargs["plot_units"] is not None):
parameter = data[ob_type][subset].values * units(self.data.units[ob_type])
else:
parameter = data[ob_type][subset]
if field_kwargs["formatter"] is not None:
mapper = getattr(wx_symbols, str(field_kwargs["formatter"]), None)
if mapper is not None:
field_kwargs.pop("formatter")
self.handle.plot_symbol(location, parameter, mapper, **field_kwargs)
else:
if self.formats[i] == "text":
self.handle.plot_text(
location, parameter, color=field_kwargs["color"]
)
else:
self.handle.plot_parameter(location, parameter, **field_kwargs)
else:
field_kwargs.pop("formatter")
self.handle.plot_parameter(location, parameter, **field_kwargs)
if self.vector_field[0] is not None:
vector_kwargs = {}
vector_kwargs["color"] = self.vector_field_color
vector_kwargs["plot_units"] = self.vector_plot_units
if hasattr(self.data, "units") and (vector_kwargs["plot_units"] is not None):
u = data[self.vector_field[0]][subset].values * units(
self.data.units[self.vector_field[0]]
)
v = data[self.vector_field[1]][subset].values * units(
self.data.units[self.vector_field[1]]
)
else:
vector_kwargs.pop("plot_units")
u = data[self.vector_field[0]][subset]
v = data[self.vector_field[1]][subset]
if self.vector_field_length is not None:
vector_kwargs["length"] = self.vector_field_length
self.handle.plot_barb(u, v, **vector_kwargs)
|
def _build(self):
"""Build the plot by calling needed plotting methods as necessary."""
lon, lat, data = self.plotdata
# Use the cartopy map projection to transform station locations to the map and
# then refine the number of stations plotted by setting a radius
if self.parent._proj_obj == ccrs.PlateCarree():
scale = 1.0
else:
scale = 100000.0
point_locs = self.parent._proj_obj.transform_points(ccrs.PlateCarree(), lon, lat)
subset = reduce_point_density(point_locs, self.reduce_points * scale)
self.handle = StationPlot(
self.parent.ax,
lon[subset],
lat[subset],
clip_on=True,
transform=ccrs.PlateCarree(),
fontsize=10,
)
for i, ob_type in enumerate(self.fields):
field_kwargs = {}
if len(self.locations) > 1:
location = self.locations[i]
else:
location = self.locations[0]
if len(self.colors) > 1:
field_kwargs["color"] = self.colors[i]
else:
field_kwargs["color"] = self.colors[0]
if len(self.formats) > 1:
field_kwargs["formatter"] = self.formats[i]
else:
field_kwargs["formatter"] = self.formats[0]
if len(self.plot_units) > 1:
field_kwargs["plot_units"] = self.plot_units[i]
else:
field_kwargs["plot_units"] = self.plot_units[0]
if hasattr(self.data, "units") and (field_kwargs["plot_units"] is not None):
parameter = data[ob_type][subset].values * units(self.data.units[ob_type])
else:
parameter = data[ob_type][subset]
if field_kwargs["formatter"] is not None:
mapper = getattr(wx_symbols, str(field_kwargs["formatter"]), None)
if mapper is not None:
field_kwargs.pop("formatter")
self.handle.plot_symbol(location, parameter, mapper, **field_kwargs)
else:
if self.formats[i] == "text":
self.handle.plot_text(
location, data[ob_type][subset], color=field_kwargs["color"]
)
else:
self.handle.plot_parameter(
location, data[ob_type][subset], **field_kwargs
)
else:
field_kwargs.pop("formatter")
self.handle.plot_parameter(location, parameter, **field_kwargs)
if self.vector_field[0] is not None:
vector_kwargs = {}
vector_kwargs["color"] = self.vector_field_color
vector_kwargs["plot_units"] = self.vector_plot_units
if hasattr(self.data, "units") and (vector_kwargs["plot_units"] is not None):
u = data[self.vector_field[0]][subset].values * units(
self.data.units[self.vector_field[0]]
)
v = data[self.vector_field[1]][subset].values * units(
self.data.units[self.vector_field[1]]
)
else:
vector_kwargs.pop("plot_units")
u = data[self.vector_field[0]][subset]
v = data[self.vector_field[1]][subset]
if self.vector_field_length is not None:
vector_kwargs["length"] = self.vector_field_length
self.handle.plot_barb(u, v, **vector_kwargs)
|
https://github.com/Unidata/MetPy/issues/1574
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-df2b7c9f2f48> in <module>
25 pc.panels = [panel]
26
---> 27 pc.show()
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in show(self)
566 def show(self):
567 """Show the constructed graphic on the screen."""
--> 568 self.draw()
569 plt.show()
570
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in draw(self)
553 for panel in self.panels:
554 with panel.hold_trait_notifications():
--> 555 panel.draw()
556
557 def save(self, *args, **kwargs):
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in draw(self)
744 for p in self.plots:
745 with p.hold_trait_notifications():
--> 746 p.draw()
747
748 # Add all of the maps
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in draw(self)
1532 if self._need_redraw:
1533 if getattr(self, 'handle', None) is None:
-> 1534 self._build()
1535 self._need_redraw = False
1536
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in _build(self)
1592 else:
1593 self.handle.plot_parameter(location, data[ob_type][subset],
-> 1594 **field_kwargs)
1595 else:
1596 field_kwargs.pop('formatter')
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/station_plot.py in plot_parameter(self, location, parameter, formatter, **kwargs)
199 # If plot_units specified, convert the data to those units
200 plotting_units = kwargs.pop('plot_units', None)
--> 201 parameter = self._scalar_plotting_units(parameter, plotting_units)
202 if hasattr(parameter, 'units'):
203 parameter = parameter.magnitude
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/station_plot.py in _scalar_plotting_units(scalar_value, plotting_units)
349 scalar_value = scalar_value.to(plotting_units)
350 else:
--> 351 raise ValueError('To convert to plotting units, units must be attached to '
352 'scalar value being converted.')
353 return scalar_value
ValueError: To convert to plotting units, units must be attached to scalar value being converted.
/Users/kgoebber/miniconda3/envs/new_main/lib/python3.7/site-packages/cartopy/mpl/geoaxes.py:388: MatplotlibDeprecationWarning:
The 'inframe' parameter of draw() was deprecated in Matplotlib 3.3 and will be removed two minor releases later. Use Axes.redraw_in_frame() instead. If any parameter follows 'inframe', they should be passed as keyword, not positionally.
inframe=inframe)
|
ValueError
|
def _scalar_plotting_units(scalar_value, plotting_units):
"""Handle conversion to plotting units for non-vector quantities."""
if plotting_units:
if hasattr(scalar_value, "units"):
scalar_value = scalar_value.to(plotting_units)
else:
raise ValueError(
"To convert to plotting units, units must be attached to "
"scalar value being converted."
)
return scalar_value
|
def _scalar_plotting_units(scalar_value, plotting_units):
"""Handle conversion to plotting units for barbs and arrows."""
if plotting_units:
if hasattr(scalar_value, "units"):
scalar_value = scalar_value.to(plotting_units)
else:
raise ValueError(
"To convert to plotting units, units must be attached to "
"scalar value being converted."
)
return scalar_value
|
https://github.com/Unidata/MetPy/issues/1574
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-df2b7c9f2f48> in <module>
25 pc.panels = [panel]
26
---> 27 pc.show()
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in show(self)
566 def show(self):
567 """Show the constructed graphic on the screen."""
--> 568 self.draw()
569 plt.show()
570
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in draw(self)
553 for panel in self.panels:
554 with panel.hold_trait_notifications():
--> 555 panel.draw()
556
557 def save(self, *args, **kwargs):
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in draw(self)
744 for p in self.plots:
745 with p.hold_trait_notifications():
--> 746 p.draw()
747
748 # Add all of the maps
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in draw(self)
1532 if self._need_redraw:
1533 if getattr(self, 'handle', None) is None:
-> 1534 self._build()
1535 self._need_redraw = False
1536
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/declarative.py in _build(self)
1592 else:
1593 self.handle.plot_parameter(location, data[ob_type][subset],
-> 1594 **field_kwargs)
1595 else:
1596 field_kwargs.pop('formatter')
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/station_plot.py in plot_parameter(self, location, parameter, formatter, **kwargs)
199 # If plot_units specified, convert the data to those units
200 plotting_units = kwargs.pop('plot_units', None)
--> 201 parameter = self._scalar_plotting_units(parameter, plotting_units)
202 if hasattr(parameter, 'units'):
203 parameter = parameter.magnitude
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/plots/station_plot.py in _scalar_plotting_units(scalar_value, plotting_units)
349 scalar_value = scalar_value.to(plotting_units)
350 else:
--> 351 raise ValueError('To convert to plotting units, units must be attached to '
352 'scalar value being converted.')
353 return scalar_value
ValueError: To convert to plotting units, units must be attached to scalar value being converted.
/Users/kgoebber/miniconda3/envs/new_main/lib/python3.7/site-packages/cartopy/mpl/geoaxes.py:388: MatplotlibDeprecationWarning:
The 'inframe' parameter of draw() was deprecated in Matplotlib 3.3 and will be removed two minor releases later. Use Axes.redraw_in_frame() instead. If any parameter follows 'inframe', they should be passed as keyword, not positionally.
inframe=inframe)
|
ValueError
|
def add_grid_arguments_from_xarray(func):
"""Fill in optional arguments like dx/dy from DataArray arguments."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
grid_prototype = None
for da in dataarray_arguments:
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number(
"x"
)
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number(
"y"
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except (AttributeError, ValueError):
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if (
"latitude" in bound_args.arguments
and bound_args.arguments["latitude"] is None
):
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
|
def add_grid_arguments_from_xarray(func):
"""Fill in optional arguments like dx/dy from DataArray arguments."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
grid_prototype = None
for da in dataarray_arguments:
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number(
"x"
)
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number(
"y"
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except AttributeError:
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if (
"latitude" in bound_args.arguments
and bound_args.arguments["latitude"] is None
):
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
|
https://github.com/Unidata/MetPy/issues/1548
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-5722b3ee1007> in <module>
3
4 # Compute the temperature advection at 850 hPa
----> 5 t_adv_850 = mpcalc.advection(smooth_tmpc, uwnd_850, vwnd_850)
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/xarray.py in wrapper(*args, **kwargs)
1158
1159 # Evaluate inner calculation
-> 1160 result = func(*bound_args.args, **bound_args.kwargs)
1161
1162 # Wrap output based on match and match_unit
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/kinematics.py in advection(scalar, u, v, w, dx, dy, dz, x_dim, y_dim, vertical_dim)
292 (u, dx, x_dim),
293 (v, dy, y_dim),
--> 294 (w, dz, vertical_dim)
295 )
296 if wind is not None
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/kinematics.py in <genexpr>(.0)
294 (w, dz, vertical_dim)
295 )
--> 296 if wind is not None
297 )
298
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/tools.py in wrapper(f, **kwargs)
894 if 'x' in kwargs or 'delta' in kwargs:
895 # Use the usual DataArray to pint.Quantity preprocessing wrapper
--> 896 return preprocess_and_wrap()(func)(f, **kwargs)
897 elif isinstance(f, xr.DataArray):
898 # Get axis argument, defaulting to first dimension
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/xarray.py in wrapper(*args, **kwargs)
1158
1159 # Evaluate inner calculation
-> 1160 result = func(*bound_args.args, **bound_args.kwargs)
1161
1162 # Wrap output based on match and match_unit
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/tools.py in first_derivative(f, axis, x, delta)
970
971 """
--> 972 n, axis, delta = _process_deriv_args(f, axis, x, delta)
973 take = make_take(n, axis)
974
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/tools.py in _process_deriv_args(f, axis, x, delta)
1273 delta = np.diff(x, axis=axis)
1274 else:
-> 1275 raise ValueError('Must specify either "x" or "delta" for value positions.')
1276
1277 return n, axis, delta
ValueError: Must specify either "x" or "delta" for value positions.
|
ValueError
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
grid_prototype = None
for da in dataarray_arguments:
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number("x")
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number("y")
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except (AttributeError, ValueError):
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if "latitude" in bound_args.arguments and bound_args.arguments["latitude"] is None:
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
|
def wrapper(*args, **kwargs):
bound_args = signature(func).bind(*args, **kwargs)
bound_args.apply_defaults()
# Search for DataArray with valid latitude and longitude coordinates to find grid
# deltas and any other needed parameter
dataarray_arguments = [
value
for value in bound_args.arguments.values()
if isinstance(value, xr.DataArray)
]
grid_prototype = None
for da in dataarray_arguments:
if hasattr(da.metpy, "latitude") and hasattr(da.metpy, "longitude"):
grid_prototype = da
break
# Fill in x_dim/y_dim
if (
grid_prototype is not None
and "x_dim" in bound_args.arguments
and "y_dim" in bound_args.arguments
):
try:
bound_args.arguments["x_dim"] = grid_prototype.metpy.find_axis_number("x")
bound_args.arguments["y_dim"] = grid_prototype.metpy.find_axis_number("y")
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Horizontal dimension numbers not found. Defaulting to "
"(..., Y, X) order."
)
# Fill in vertical_dim
if grid_prototype is not None and "vertical_dim" in bound_args.arguments:
try:
bound_args.arguments["vertical_dim"] = (
grid_prototype.metpy.find_axis_number("vertical")
)
except AttributeError:
# If axis number not found, fall back to default but warn.
warnings.warn(
"Vertical dimension number not found. Defaulting to (..., Z, Y, X) order."
)
# Fill in dz
if (
grid_prototype is not None
and "dz" in bound_args.arguments
and bound_args.arguments["dz"] is None
):
try:
vertical_coord = grid_prototype.metpy.vertical
bound_args.arguments["dz"] = np.diff(vertical_coord.metpy.unit_array)
except AttributeError:
# Skip, since this only comes up in advection, where dz is optional (may not
# need vertical at all)
pass
# Fill in dx/dy
if (
"dx" in bound_args.arguments
and bound_args.arguments["dx"] is None
and "dy" in bound_args.arguments
and bound_args.arguments["dy"] is None
):
if grid_prototype is not None:
bound_args.arguments["dx"], bound_args.arguments["dy"] = (
grid_deltas_from_dataarray(grid_prototype, kind="actual")
)
elif "dz" in bound_args.arguments:
# Handle advection case, allowing dx/dy to be None but dz to not be None
if bound_args.arguments["dz"] is None:
raise ValueError(
"Must provide dx, dy, and/or dz arguments or input DataArray with "
"proper coordinates."
)
else:
raise ValueError(
"Must provide dx/dy arguments or input DataArray with "
"latitude/longitude coordinates."
)
# Fill in latitude
if "latitude" in bound_args.arguments and bound_args.arguments["latitude"] is None:
if grid_prototype is not None:
bound_args.arguments["latitude"] = grid_prototype.metpy.latitude
else:
raise ValueError(
"Must provide latitude argument or input DataArray with "
"latitude/longitude coordinates."
)
return func(*bound_args.args, **bound_args.kwargs)
|
https://github.com/Unidata/MetPy/issues/1548
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-5722b3ee1007> in <module>
3
4 # Compute the temperature advection at 850 hPa
----> 5 t_adv_850 = mpcalc.advection(smooth_tmpc, uwnd_850, vwnd_850)
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/xarray.py in wrapper(*args, **kwargs)
1158
1159 # Evaluate inner calculation
-> 1160 result = func(*bound_args.args, **bound_args.kwargs)
1161
1162 # Wrap output based on match and match_unit
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/kinematics.py in advection(scalar, u, v, w, dx, dy, dz, x_dim, y_dim, vertical_dim)
292 (u, dx, x_dim),
293 (v, dy, y_dim),
--> 294 (w, dz, vertical_dim)
295 )
296 if wind is not None
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/kinematics.py in <genexpr>(.0)
294 (w, dz, vertical_dim)
295 )
--> 296 if wind is not None
297 )
298
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/tools.py in wrapper(f, **kwargs)
894 if 'x' in kwargs or 'delta' in kwargs:
895 # Use the usual DataArray to pint.Quantity preprocessing wrapper
--> 896 return preprocess_and_wrap()(func)(f, **kwargs)
897 elif isinstance(f, xr.DataArray):
898 # Get axis argument, defaulting to first dimension
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/xarray.py in wrapper(*args, **kwargs)
1158
1159 # Evaluate inner calculation
-> 1160 result = func(*bound_args.args, **bound_args.kwargs)
1161
1162 # Wrap output based on match and match_unit
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/tools.py in first_derivative(f, axis, x, delta)
970
971 """
--> 972 n, axis, delta = _process_deriv_args(f, axis, x, delta)
973 take = make_take(n, axis)
974
~/miniconda3/envs/new_main/lib/python3.7/site-packages/metpy/calc/tools.py in _process_deriv_args(f, axis, x, delta)
1273 delta = np.diff(x, axis=axis)
1274 else:
-> 1275 raise ValueError('Must specify either "x" or "delta" for value positions.')
1276
1277 return n, axis, delta
ValueError: Must specify either "x" or "delta" for value positions.
|
ValueError
|
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return temperature.units * np.insert(temperature.m, loc, interp_temp.m)
|
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
|
https://github.com/Unidata/MetPy/issues/1496
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-d3bf39d73e06> in <module>
7 td = units.degC * np.ma.array([20, 10, -5])
8
----> 9 mpcalc.surface_based_cape_cin(pressure, t, td)
~/repos/metpy/src/metpy/xarray.py in wrapper(*args, **kwargs)
1065 kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
1066 for name, v in kwargs.items()}
-> 1067 return func(*args, **kwargs)
1068 return wrapper
1069
~/repos/metpy/src/metpy/units.py in wrapper(*args, **kwargs)
231 ' x = x * units.meter / units.second')
232 raise ValueError(msg)
--> 233 return func(*args, **kwargs)
234
235 return wrapper
~/repos/metpy/src/metpy/calc/thermo.py in surface_based_cape_cin(pressure, temperature, dewpoint)
1826 pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
1827 p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
-> 1828 return cape_cin(p, t, td, profile)
1829
1830
~/repos/metpy/src/metpy/xarray.py in wrapper(*args, **kwargs)
1065 kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
1066 for name, v in kwargs.items()}
-> 1067 return func(*args, **kwargs)
1068 return wrapper
1069
~/repos/metpy/src/metpy/units.py in wrapper(*args, **kwargs)
230 ' from metpy.units import units\n'
231 ' x = x * units.meter / units.second')
--> 232 raise ValueError(msg)
233 return func(*args, **kwargs)
234
ValueError: `cape_cin` given arguments with incorrect units: `temperature` requires "[temperature]" but given "none", `dewpoint` requires "[temperature]" but given "none"
Any variable `x` can be assigned a unit as follows:
from metpy.units import units
x = x * units.meter / units.second
|
ValueError
|
def _find_append_zero_crossings(x, y):
r"""
Find and interpolate zero crossings.
Estimate the zero crossings of an x,y series and add estimated crossings to series,
returning a sorted array with no duplicate values.
Parameters
----------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
Returns
-------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
"""
crossings = find_intersections(
x[1:], y[1:], y.units * np.zeros_like(y[1:]), log_x=True
)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
# Resort so that data are in order
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
# Remove duplicate data points if there are any
keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6
x = x[keep_idx]
y = y[keep_idx]
return x, y
|
def _find_append_zero_crossings(x, y):
r"""
Find and interpolate zero crossings.
Estimate the zero crossings of an x,y series and add estimated crossings to series,
returning a sorted array with no duplicate values.
Parameters
----------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
Returns
-------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
"""
crossings = find_intersections(
x[1:], y[1:], np.zeros_like(y[1:]) * y.units, log_x=True
)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
# Resort so that data are in order
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
# Remove duplicate data points if there are any
keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6
x = x[keep_idx]
y = y[keep_idx]
return x, y
|
https://github.com/Unidata/MetPy/issues/1496
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-8-d3bf39d73e06> in <module>
7 td = units.degC * np.ma.array([20, 10, -5])
8
----> 9 mpcalc.surface_based_cape_cin(pressure, t, td)
~/repos/metpy/src/metpy/xarray.py in wrapper(*args, **kwargs)
1065 kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
1066 for name, v in kwargs.items()}
-> 1067 return func(*args, **kwargs)
1068 return wrapper
1069
~/repos/metpy/src/metpy/units.py in wrapper(*args, **kwargs)
231 ' x = x * units.meter / units.second')
232 raise ValueError(msg)
--> 233 return func(*args, **kwargs)
234
235 return wrapper
~/repos/metpy/src/metpy/calc/thermo.py in surface_based_cape_cin(pressure, temperature, dewpoint)
1826 pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
1827 p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
-> 1828 return cape_cin(p, t, td, profile)
1829
1830
~/repos/metpy/src/metpy/xarray.py in wrapper(*args, **kwargs)
1065 kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
1066 for name, v in kwargs.items()}
-> 1067 return func(*args, **kwargs)
1068 return wrapper
1069
~/repos/metpy/src/metpy/units.py in wrapper(*args, **kwargs)
230 ' from metpy.units import units\n'
231 ' x = x * units.meter / units.second')
--> 232 raise ValueError(msg)
233 return func(*args, **kwargs)
234
ValueError: `cape_cin` given arguments with incorrect units: `temperature` requires "[temperature]" but given "none", `dewpoint` requires "[temperature]" but given "none"
Any variable `x` can be assigned a unit as follows:
from metpy.units import units
x = x * units.meter / units.second
|
ValueError
|
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere
([NOAA1976]_) is assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
"""
# avoid circular import if basic.py ever imports something from tools.py
from .basic import height_to_pressure_std, pressure_to_height_std
# Make sure pressure is monotonically decreasing
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
if heights is not None:
heights = heights[sort_inds]
# Bound is given in pressure
if bound.dimensionality == {"[length]": -1.0, "[mass]": 1.0, "[time]": -2.0}:
# If the bound is in the pressure data, we know the pressure bound exactly
if bound in pressure:
bound_pressure = bound
# If we have heights, we know the exact height value, otherwise return standard
# atmosphere height for the pressure
if heights is not None:
bound_height = heights[pressure == bound_pressure]
else:
bound_height = pressure_to_height_std(bound_pressure)
# If bound is not in the data, return the nearest or interpolated values
else:
if interpolate:
bound_pressure = bound # Use the user specified bound
if heights is not None: # Interpolate heights from the height data
bound_height = log_interpolate_1d(bound_pressure, pressure, heights)
else: # If not heights given, use the standard atmosphere
bound_height = pressure_to_height_std(bound_pressure)
else: # No interpolation, find the closest values
idx = (np.abs(pressure - bound)).argmin()
bound_pressure = pressure[idx]
if heights is not None:
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
# Bound is given in height
elif bound.dimensionality == {"[length]": 1.0}:
# If there is height data, see if we have the bound or need to interpolate/find nearest
if heights is not None:
if bound in heights: # Bound is in the height data
bound_height = bound
bound_pressure = pressure[heights == bound]
else: # Bound is not in the data
if interpolate:
bound_height = bound
# Need to cast back to the input type since interp (up to at least numpy
# 1.13 always returns float64. This can cause upstream users problems,
# resulting in something like np.append() to upcast.
bound_pressure = (
np.interp(np.atleast_1d(bound.m), heights.m, pressure.m).astype(
result_type(bound)
)
* pressure.units
)
else:
idx = (np.abs(heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else: # Don't have heights, so assume a standard atmosphere
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
# If interpolation is on, this is all we need, if not, we need to go back and
# find the pressure closest to this and refigure the bounds
if not interpolate:
idx = (np.abs(pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
# Bound has invalid units
else:
raise ValueError("Bound must be specified in units of length or pressure.")
# If the bound is out of the range of the data, we shouldn't extrapolate
if not (
_greater_or_close(bound_pressure, np.nanmin(pressure.m) * pressure.units)
and _less_or_close(bound_pressure, np.nanmax(pressure.m) * pressure.units)
):
raise ValueError("Specified bound is outside pressure range.")
if heights is not None and not (
_less_or_close(bound_height, np.nanmax(heights.m) * heights.units)
and _greater_or_close(bound_height, np.nanmin(heights.m) * heights.units)
):
raise ValueError("Specified bound is outside height range.")
return bound_pressure, bound_height
|
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere
([NOAA1976]_) is assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
"""
# Make sure pressure is monotonically decreasing
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
if heights is not None:
heights = heights[sort_inds]
# Bound is given in pressure
if bound.dimensionality == {"[length]": -1.0, "[mass]": 1.0, "[time]": -2.0}:
# If the bound is in the pressure data, we know the pressure bound exactly
if bound in pressure:
bound_pressure = bound
# If we have heights, we know the exact height value, otherwise return standard
# atmosphere height for the pressure
if heights is not None:
bound_height = heights[pressure == bound_pressure]
else:
bound_height = pressure_to_height_std(bound_pressure)
# If bound is not in the data, return the nearest or interpolated values
else:
if interpolate:
bound_pressure = bound # Use the user specified bound
if heights is not None: # Interpolate heights from the height data
bound_height = log_interpolate_1d(bound_pressure, pressure, heights)
else: # If not heights given, use the standard atmosphere
bound_height = pressure_to_height_std(bound_pressure)
else: # No interpolation, find the closest values
idx = (np.abs(pressure - bound)).argmin()
bound_pressure = pressure[idx]
if heights is not None:
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
# Bound is given in height
elif bound.dimensionality == {"[length]": 1.0}:
# If there is height data, see if we have the bound or need to interpolate/find nearest
if heights is not None:
if bound in heights: # Bound is in the height data
bound_height = bound
bound_pressure = pressure[heights == bound]
else: # Bound is not in the data
if interpolate:
bound_height = bound
# Need to cast back to the input type since interp (up to at least numpy
# 1.13 always returns float64. This can cause upstream users problems,
# resulting in something like np.append() to upcast.
bound_pressure = (
np.interp(np.atleast_1d(bound.m), heights.m, pressure.m).astype(
result_type(bound)
)
* pressure.units
)
else:
idx = (np.abs(heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else: # Don't have heights, so assume a standard atmosphere
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
# If interpolation is on, this is all we need, if not, we need to go back and
# find the pressure closest to this and refigure the bounds
if not interpolate:
idx = (np.abs(pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
# Bound has invalid units
else:
raise ValueError("Bound must be specified in units of length or pressure.")
# If the bound is out of the range of the data, we shouldn't extrapolate
if not (
_greater_or_close(bound_pressure, np.nanmin(pressure.m) * pressure.units)
and _less_or_close(bound_pressure, np.nanmax(pressure.m) * pressure.units)
):
raise ValueError("Specified bound is outside pressure range.")
if heights is not None and not (
_less_or_close(bound_height, np.nanmax(heights.m) * heights.units)
and _greater_or_close(bound_height, np.nanmin(heights.m) * heights.units)
):
raise ValueError("Specified bound is outside height range.")
return bound_pressure, bound_height
|
https://github.com/Unidata/MetPy/issues/1211
|
from metpy.calc import wind_direction
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/akrherz/projects/MetPy/src/metpy/calc/__init__.py", line 6, in <module>
from .basic import * # noqa: F403
File "/home/akrherz/projects/MetPy/src/metpy/calc/basic.py", line 21, in <module>
from .tools import first_derivative
File "/home/akrherz/projects/MetPy/src/metpy/calc/tools.py", line 22, in <module>
from .basic import height_to_pressure_std, pressure_to_height_std
ImportError: cannot import name 'height_to_pressure_std'
|
ImportError
|
def lfc(
pressure,
temperature,
dewpt,
parcel_temperature_profile=None,
dewpt_start=None,
which="top",
):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature. If this intersection occurs below the LCL,
the LFC is determined to be the same as the LCL, based upon the conditions
set forth in [USAF1990]_, pg 4-14, where a parcel must be lifted dry adiabatically
to saturation before it can freely rise.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpt_start: `pint.Quantity`, optional
The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', and 'all'.
Default is the 'top' (lowest pressure) LFC.
Returns
-------
`pint.Quantity`
The LFC pressure, or array of same if which='all'
`pint.Quantity`
The LFC temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
if dewpt_start is None:
dewpt_start = dewpt[0]
# The parcel profile and data may have the same first data point.
# If that is the case, ignore that point to get the real first
# intersection for the LFC calculation. Use logarithmic interpolation.
if np.isclose(
parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m
):
x, y = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="increasing",
log_x=True,
)
else:
x, y = find_intersections(
pressure,
parcel_temperature_profile,
temperature,
direction="increasing",
log_x=True,
)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpt_start)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
x, y = np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
el_pres, _ = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="decreasing",
log_x=True,
)
if np.min(el_pres) > this_lcl[0]:
x, y = np.nan * pressure.units, np.nan * temperature.units
else:
x, y = this_lcl
return x, y
# Otherwise, find all LFCs that exist above the LCL
# What is returned depends on which flag as described in the docstring
else:
return _multiple_el_lfc_options(x, y, idx, which)
|
def lfc(
pressure,
temperature,
dewpt,
parcel_temperature_profile=None,
dewpt_start=None,
which="top",
):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpt_start: `pint.Quantity`, optional
The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', and 'all'.
Default is the 'top' (lowest pressure) LFC.
Returns
-------
`pint.Quantity`
The LFC pressure, or array of same if which='all'
`pint.Quantity`
The LFC temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
if dewpt_start is None:
dewpt_start = dewpt[0]
# The parcel profile and data may have the same first data point.
# If that is the case, ignore that point to get the real first
# intersection for the LFC calculation. Use logarithmic interpolation.
if np.isclose(parcel_temperature_profile[0].m, temperature[0].m):
x, y = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="increasing",
log_x=True,
)
else:
x, y = find_intersections(
pressure,
parcel_temperature_profile,
temperature,
direction="increasing",
log_x=True,
)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpt_start)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
x, y = np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
el_pres, _ = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="decreasing",
log_x=True,
)
if np.min(el_pres) > this_lcl[0]:
x, y = np.nan * pressure.units, np.nan * temperature.units
else:
x, y = this_lcl
return x, y
# Otherwise, find all LFCs that exist above the LCL
# What is returned depends on which flag as described in the docstring
else:
return _multiple_el_lfc_options(x, y, idx, which)
|
https://github.com/Unidata/MetPy/issues/1190
|
Traceback (most recent call last):
File "trouble_sounding.py", line 18, in <module>
filtered['dewpoint'].values * units.degC)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 1756, in surface_based_cape_cin
return cape_cin(p, t, td, profile)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 1430, in cape_cin
parcel_temperature_profile=parcel_profile)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 451, in lfc
if np.min(el_pres) > this_lcl[0]:
File "<__array_function__ internals>", line 6, in amin
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 2746, in amin
keepdims=keepdims, initial=initial, where=where)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 88, in _wrapreduction
return reduction(axis=axis, out=out, **passkwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/pint/quantity.py", line 1357, in __numpy_method_wrap
value = func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/numpy/core/_methods.py", line 34, in _amin
return umr_minimum(a, axis, None, out, keepdims, initial, where)
ValueError: zero-size array to reduction operation minimum which has no identity
|
ValueError
|
def cape_cin(pressure, temperature, dewpt, parcel_profile):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points of
the measured temperature profile and parcel profile are linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpt : `pint.Quantity`
The atmospheric dewpoint corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
Returns
-------
`pint.Quantity`
Convective Available Potential Energy (CAPE).
`pint.Quantity`
Convective INhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
"""
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(
pressure, temperature, dewpt, parcel_temperature_profile=parcel_profile
)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units("J/kg"), 0 * units("J/kg")
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(
pressure, temperature, dewpt, parcel_temperature_profile=parcel_profile
)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cape = (mpconsts.Rd * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(
units("J/kg")
)
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x.m, lfc_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cin = (mpconsts.Rd * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(
units("J/kg")
)
# Set CIN to 0 if it's returned as a positive value (#1190)
if cin > 0 * units("J/kg"):
cin = 0 * units("J/kg")
return cape, cin
|
def cape_cin(pressure, temperature, dewpt, parcel_profile):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points of
the measured temperature profile and parcel profile are linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpt : `pint.Quantity`
The atmospheric dewpoint corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
Returns
-------
`pint.Quantity`
Convective Available Potential Energy (CAPE).
`pint.Quantity`
Convective INhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
"""
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(
pressure, temperature, dewpt, parcel_temperature_profile=parcel_profile
)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units("J/kg"), 0 * units("J/kg")
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(
pressure, temperature, dewpt, parcel_temperature_profile=parcel_profile
)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cape = (mpconsts.Rd * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(
units("J/kg")
)
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x.m, lfc_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cin = (mpconsts.Rd * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(
units("J/kg")
)
return cape, cin
|
https://github.com/Unidata/MetPy/issues/1190
|
Traceback (most recent call last):
File "trouble_sounding.py", line 18, in <module>
filtered['dewpoint'].values * units.degC)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 1756, in surface_based_cape_cin
return cape_cin(p, t, td, profile)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 1430, in cape_cin
parcel_temperature_profile=parcel_profile)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 451, in lfc
if np.min(el_pres) > this_lcl[0]:
File "<__array_function__ internals>", line 6, in amin
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 2746, in amin
keepdims=keepdims, initial=initial, where=where)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 88, in _wrapreduction
return reduction(axis=axis, out=out, **passkwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/pint/quantity.py", line 1357, in __numpy_method_wrap
value = func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/numpy/core/_methods.py", line 34, in _amin
return umr_minimum(a, axis, None, out, keepdims, initial, where)
ValueError: zero-size array to reduction operation minimum which has no identity
|
ValueError
|
def lcl(pressure, temperature, dewpt, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpt`,
and `pressure`.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The LCL pressure
`pint.Quantity`
The LCL temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dewpoint from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
"""
def _lcl_iter(p, p0, w, t):
td = dewpoint(vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1.0 / mpconsts.kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpt), pressure)
fp = so.fixed_point(
_lcl_iter,
pressure.m,
args=(pressure.m, w, temperature),
xtol=eps,
maxiter=max_iters,
)
lcl_p = fp * pressure.units
# Conditional needed due to precision error with np.log in dewpoint.
# Causes issues with parcel_profile_with_lcl if removed. Issue #1187
if np.isclose(lcl_p, pressure):
lcl_p = pressure
return lcl_p, dewpoint(vapor_pressure(lcl_p, w)).to(temperature.units)
|
def lcl(pressure, temperature, dewpt, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpt`,
and `pressure`.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The LCL pressure
`pint.Quantity`
The LCL temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dewpoint from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
"""
def _lcl_iter(p, p0, w, t):
td = dewpoint(vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1.0 / mpconsts.kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpt), pressure)
fp = so.fixed_point(
_lcl_iter,
pressure.m,
args=(pressure.m, w, temperature),
xtol=eps,
maxiter=max_iters,
)
lcl_p = fp * pressure.units
return lcl_p, dewpoint(vapor_pressure(lcl_p, w)).to(temperature.units)
|
https://github.com/Unidata/MetPy/issues/1187
|
/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/interpolate/one_dimension.py:144: UserWarning: Interpolation point out of data bounds encountered
warnings.warn('Interpolation point out of data bounds encountered')
Traceback (most recent call last):
File "trouble_sounding.py", line 17, in <module>
filtered['dewpoint'].values * units.degC)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 1756, in surface_based_cape_cin
return cape_cin(p, t, td, profile)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 1430, in cape_cin
parcel_temperature_profile=parcel_profile)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 425, in lfc
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpt_start)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/xarray.py", line 571, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/units.py", line 312, in wrapper
return func(*args, **kwargs)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/metpy/calc/thermo.py", line 360, in lcl
xtol=eps, maxiter=max_iters)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/scipy/optimize/minpack.py", line 903, in fixed_point
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
File "/opt/miniconda3/envs/prod/lib/python3.6/site-packages/scipy/optimize/minpack.py", line 857, in _fixed_point_helper
raise RuntimeError(msg)
RuntimeError: Failed to converge after 50 iterations, value is nan
|
RuntimeError
|
def interpolate_1d(x, xp, *args, **kwargs):
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.calc.interp(x_interp, x, y)
array([2.5, 3.5])
Notes
-----
xp and args must be the same shape.
"""
# Pull out keyword args
fill_value = kwargs.pop("fill_value", np.nan)
axis = kwargs.pop("axis", 0)
# Handle units
x, xp = _strip_matching_units(x, xp)
# Make x an array
x = np.asanyarray(x).reshape(-1)
# Save number of dimensions in xp
ndim = xp.ndim
# Sort input data
sort_args = np.argsort(xp, axis=axis)
sort_x = np.argsort(x)
# indices for sorting
sorter = broadcast_indices(xp, sort_args, ndim, axis)
# sort xp
xp = xp[sorter]
# Ensure pressure in increasing order
variables = [arr[sorter] for arr in args]
# Make x broadcast with xp
x_array = x[sort_x]
expand = [np.newaxis] * ndim
expand[axis] = slice(None)
x_array = x_array[tuple(expand)]
# Calculate value above interpolated value
minv = np.apply_along_axis(np.searchsorted, axis, xp, x[sort_x])
minv2 = np.copy(minv)
# If fill_value is none and data is out of bounds, raise value error
if ((np.max(minv) == xp.shape[axis]) or (np.min(minv) == 0)) and fill_value is None:
raise ValueError("Interpolation point out of data bounds encountered")
# Warn if interpolated values are outside data bounds, will make these the values
# at end of data range.
if np.max(minv) == xp.shape[axis]:
warnings.warn("Interpolation point out of data bounds encountered")
minv2[minv == xp.shape[axis]] = xp.shape[axis] - 1
if np.min(minv) == 0:
minv2[minv == 0] = 1
# Get indices for broadcasting arrays
above = broadcast_indices(xp, minv2, ndim, axis)
below = broadcast_indices(xp, minv2 - 1, ndim, axis)
if np.any(x_array < xp[below]):
warnings.warn("Interpolation point out of data bounds encountered")
# Create empty output list
ret = []
# Calculate interpolation for each variable
for var in variables:
# Var needs to be on the *left* of the multiply to ensure that if it's a pint
# Quantity, it gets to control the operation--at least until we make sure
# masked arrays and pint play together better. See https://github.com/hgrecco/pint#633
var_interp = var[below] + (var[above] - var[below]) * (
(x_array - xp[below]) / (xp[above] - xp[below])
)
# Set points out of bounds to fill value.
var_interp[minv == xp.shape[axis]] = fill_value
var_interp[x_array < xp[below]] = fill_value
# Check for input points in decreasing order and return output to match.
if x[0] > x[-1]:
var_interp = np.swapaxes(np.swapaxes(var_interp, 0, axis)[::-1], 0, axis)
# Output to list
ret.append(var_interp)
if len(ret) == 1:
return ret[0]
else:
return ret
|
def interpolate_1d(x, xp, *args, **kwargs):
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.calc.interp(x_interp, x, y)
array([2.5, 3.5])
Notes
-----
xp and args must be the same shape.
"""
# Pull out keyword args
fill_value = kwargs.pop("fill_value", np.nan)
axis = kwargs.pop("axis", 0)
# Make x an array
x = np.asanyarray(x).reshape(-1)
# Save number of dimensions in xp
ndim = xp.ndim
# Sort input data
sort_args = np.argsort(xp, axis=axis)
sort_x = np.argsort(x)
# indices for sorting
sorter = broadcast_indices(xp, sort_args, ndim, axis)
# sort xp
xp = xp[sorter]
# Ensure pressure in increasing order
variables = [arr[sorter] for arr in args]
# Make x broadcast with xp
x_array = x[sort_x]
expand = [np.newaxis] * ndim
expand[axis] = slice(None)
x_array = x_array[tuple(expand)]
# Calculate value above interpolated value
minv = np.apply_along_axis(np.searchsorted, axis, xp, x[sort_x])
minv2 = np.copy(minv)
# If fill_value is none and data is out of bounds, raise value error
if ((np.max(minv) == xp.shape[axis]) or (np.min(minv) == 0)) and fill_value is None:
raise ValueError("Interpolation point out of data bounds encountered")
# Warn if interpolated values are outside data bounds, will make these the values
# at end of data range.
if np.max(minv) == xp.shape[axis]:
warnings.warn("Interpolation point out of data bounds encountered")
minv2[minv == xp.shape[axis]] = xp.shape[axis] - 1
if np.min(minv) == 0:
minv2[minv == 0] = 1
# Get indices for broadcasting arrays
above = broadcast_indices(xp, minv2, ndim, axis)
below = broadcast_indices(xp, minv2 - 1, ndim, axis)
if np.any(x_array < xp[below]):
warnings.warn("Interpolation point out of data bounds encountered")
# Create empty output list
ret = []
# Calculate interpolation for each variable
for var in variables:
# Var needs to be on the *left* of the multiply to ensure that if it's a pint
# Quantity, it gets to control the operation--at least until we make sure
# masked arrays and pint play together better. See https://github.com/hgrecco/pint#633
var_interp = var[below] + (var[above] - var[below]) * (
(x_array - xp[below]) / (xp[above] - xp[below])
)
# Set points out of bounds to fill value.
var_interp[minv == xp.shape[axis]] = fill_value
var_interp[x_array < xp[below]] = fill_value
# Check for input points in decreasing order and return output to match.
if x[0] > x[-1]:
var_interp = np.swapaxes(np.swapaxes(var_interp, 0, axis)[::-1], 0, axis)
# Output to list
ret.append(var_interp)
if len(ret) == 1:
return ret[0]
else:
return ret
|
https://github.com/Unidata/MetPy/issues/997
|
/home/markmuetz/anaconda3/envs/metpy_v10.0_test_minimal/lib/python3.6/site-packages/pint/quantity.py:1377: UnitStrippedWarning: The unit of the quantity is stripped.
warnings.warn("The unit of the quantity is stripped.", UnitStrippedWarning)
Traceback (most recent call last):
File "cape_calc.py", line 92, in <module>
print(mpcalc.surface_based_cape_cin(p_profile * units('Pa'), T_profile * units('K'), Td_profile * units('degC')))
File "/home/markmuetz/projects/MetPy/metpy/xarray.py", line 381, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/units.py", line 305, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/calc/thermo.py", line 1652, in surface_based_cape_cin
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
File "/home/markmuetz/projects/MetPy/metpy/xarray.py", line 381, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/units.py", line 305, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/calc/thermo.py", line 514, in parcel_profile_with_lcl
new_temp = _insert_lcl_level(pressure, temperature, p_lcl)
File "/home/markmuetz/projects/MetPy/metpy/calc/thermo.py", line 552, in _insert_lcl_level
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
File "/home/markmuetz/projects/MetPy/metpy/xarray.py", line 381, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/anaconda3/envs/metpy_v10.0_test_minimal/lib/python3.6/site-packages/pint/registry_helpers.py", line 194, in wrapper
result = func(*new_values, **kw)
File "/home/markmuetz/projects/MetPy/metpy/interpolate/one_dimension.py", line 119, in interpolate_1d
variables = [arr[sorter] for arr in args]
File "/home/markmuetz/projects/MetPy/metpy/interpolate/one_dimension.py", line 119, in <listcomp>
variables = [arr[sorter] for arr in args]
TypeError: tuple indices must be integers or slices, not tuple
|
TypeError
|
def log_interpolate_1d(x, xp, *args, **kwargs):
r"""Interpolates data with logarithmic x-scale over a specified axis.
Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x_log = np.array([1e3, 1e4, 1e5, 1e6])
>>> y_log = np.log(x_log) * 2 + 3
>>> x_interp = np.array([5e3, 5e4, 5e5])
>>> metpy.calc.log_interp(x_interp, x_log, y_log)
array([20.03438638, 24.63955657, 29.24472675])
Notes
-----
xp and args must be the same shape.
"""
# Pull out kwargs
fill_value = kwargs.pop("fill_value", np.nan)
axis = kwargs.pop("axis", 0)
# Handle units
x, xp = _strip_matching_units(x, xp)
# Log x and xp
log_x = np.log(x)
log_xp = np.log(xp)
return interpolate_1d(log_x, log_xp, *args, axis=axis, fill_value=fill_value)
|
def log_interpolate_1d(x, xp, *args, **kwargs):
r"""Interpolates data with logarithmic x-scale over a specified axis.
Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x_log = np.array([1e3, 1e4, 1e5, 1e6])
>>> y_log = np.log(x_log) * 2 + 3
>>> x_interp = np.array([5e3, 5e4, 5e5])
>>> metpy.calc.log_interp(x_interp, x_log, y_log)
array([20.03438638, 24.63955657, 29.24472675])
Notes
-----
xp and args must be the same shape.
"""
# Pull out kwargs
fill_value = kwargs.pop("fill_value", np.nan)
axis = kwargs.pop("axis", 0)
# Log x and xp
log_x = np.log(x)
log_xp = np.log(xp)
return interpolate_1d(log_x, log_xp, *args, axis=axis, fill_value=fill_value)
|
https://github.com/Unidata/MetPy/issues/997
|
/home/markmuetz/anaconda3/envs/metpy_v10.0_test_minimal/lib/python3.6/site-packages/pint/quantity.py:1377: UnitStrippedWarning: The unit of the quantity is stripped.
warnings.warn("The unit of the quantity is stripped.", UnitStrippedWarning)
Traceback (most recent call last):
File "cape_calc.py", line 92, in <module>
print(mpcalc.surface_based_cape_cin(p_profile * units('Pa'), T_profile * units('K'), Td_profile * units('degC')))
File "/home/markmuetz/projects/MetPy/metpy/xarray.py", line 381, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/units.py", line 305, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/calc/thermo.py", line 1652, in surface_based_cape_cin
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
File "/home/markmuetz/projects/MetPy/metpy/xarray.py", line 381, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/units.py", line 305, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/projects/MetPy/metpy/calc/thermo.py", line 514, in parcel_profile_with_lcl
new_temp = _insert_lcl_level(pressure, temperature, p_lcl)
File "/home/markmuetz/projects/MetPy/metpy/calc/thermo.py", line 552, in _insert_lcl_level
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
File "/home/markmuetz/projects/MetPy/metpy/xarray.py", line 381, in wrapper
return func(*args, **kwargs)
File "/home/markmuetz/anaconda3/envs/metpy_v10.0_test_minimal/lib/python3.6/site-packages/pint/registry_helpers.py", line 194, in wrapper
result = func(*new_values, **kw)
File "/home/markmuetz/projects/MetPy/metpy/interpolate/one_dimension.py", line 119, in interpolate_1d
variables = [arr[sorter] for arr in args]
File "/home/markmuetz/projects/MetPy/metpy/interpolate/one_dimension.py", line 119, in <listcomp>
variables = [arr[sorter] for arr in args]
TypeError: tuple indices must be integers or slices, not tuple
|
TypeError
|
def lfc(pressure, temperature, dewpt, parcel_temperature_profile=None):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity`
The LFC pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
temperature = temperature.to("degC")
parcel_temperature_profile = parcel_temperature_profile.to("degC")
# The parcel profile and data have the same first data point, so we ignore
# that point to get the real first intersection for the LFC calculation.
x, y = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="increasing",
)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], temperature[0], dewpt[0])
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
return np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
x, y = this_lcl
return x, y
# Otherwise, make select first candidate LFC above the LCL
else:
x = x[idx]
y = y[idx]
return x[0], y[0]
|
def lfc(pressure, temperature, dewpt, parcel_temperature_profile=None):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity`
The LFC pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
temperature = temperature.to("degC")
parcel_temperature_profile = parcel_temperature_profile.to("degC")
# The parcel profile and data have the same first data point, so we ignore
# that point to get the real first intersection for the LFC calculation.
x, y = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="increasing",
)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], temperature[0], dewpt[0])
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
return np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists and is not LCL. Make sure it is above the LCL.
else:
idx = x < lcl(pressure[0], temperature[0], dewpt[0])[0]
x = x[idx]
y = y[idx]
return x[0], y[0]
|
https://github.com/Unidata/MetPy/issues/945
|
0.9.1+6.gb7f97991
1024.95703125 hectopascal 5.743482661750363 degC 2.08279974748133 degC
[278.89348266 278.24322133 277.35755297 276.22928809 274.81361678 273.52555096 272.08325952 270.57324142 269.10199475 267.56801846 265.96421247 263.5399307 260.21335861 256.7576317 253.17725629 249.48027885 245.67713199 241.78102546 237.80684667 233.77057953 229.68754004 225.57206704 221.43686749 217.29167908 213.14477576 209.0024604 204.86851485 200.74607368 196.63681225 192.54139059 188.45988434 184.39212913 180.33722545 176.29398272 172.27346048 168.31396424 164.44241275 160.65710232 156.95530012 153.33548532 149.79516513 146.33278761 142.94622388 139.63343896 136.39268027 133.22221317 130.12009053 127.08448424 124.11358965] kelvin
49 49 49 49
Traceback (most recent call last):
File "lfc_bug.py", line 47, in <module>
mllfc_p, mllfc_T = mpcalc.lfc(P, T, Td, parcel_temperature_profile=ml_profile)
File "/home/decker/src/git_repos/metpy/metpy/xarray.py", line 381, in wrapper
return func(*args, **kwargs)
File "/home/decker/src/git_repos/metpy/metpy/units.py", line 305, in wrapper
return func(*args, **kwargs)
File "/home/decker/src/git_repos/metpy/metpy/calc/thermo.py", line 383, in lfc
return x[0], y[0]
File "/home/decker/local/anaconda3/envs/devel/lib/python3.6/site-packages/pint/quantity.py", line 1281, in __getitem__
value = self._magnitude[key]
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def storm_relative_helicity(
u,
v,
heights,
depth,
bottom=0 * units.m,
storm_u=0 * units("m/s"),
storm_v=0 * units("m/s"),
):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
"""
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = (
storm_relative_u[1:] * storm_relative_v[:-1]
- storm_relative_u[:-1] * storm_relative_v[1:]
)
# Need to manually check for masked value because sum() on masked array with non-default
# mask will return a masked value rather than 0. See numpy/numpy#11736
positive_srh = int_layers[int_layers.magnitude > 0.0].sum()
if np.ma.is_masked(positive_srh):
positive_srh = 0.0 * units("meter**2 / second**2")
negative_srh = int_layers[int_layers.magnitude < 0.0].sum()
if np.ma.is_masked(negative_srh):
negative_srh = 0.0 * units("meter**2 / second**2")
return (
positive_srh.to("meter ** 2 / second ** 2"),
negative_srh.to("meter ** 2 / second ** 2"),
(positive_srh + negative_srh).to("meter ** 2 / second ** 2"),
)
|
def storm_relative_helicity(
u,
v,
heights,
depth,
bottom=0 * units.m,
storm_u=0 * units("m/s"),
storm_v=0 * units("m/s"),
):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
"""
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = (
storm_relative_u[1:] * storm_relative_v[:-1]
- storm_relative_u[:-1] * storm_relative_v[1:]
)
positive_srh = int_layers[int_layers.magnitude > 0.0].sum()
negative_srh = int_layers[int_layers.magnitude < 0.0].sum()
return (
positive_srh.to("meter ** 2 / second ** 2"),
negative_srh.to("meter ** 2 / second ** 2"),
(positive_srh + negative_srh).to("meter ** 2 / second ** 2"),
)
|
https://github.com/Unidata/MetPy/issues/902
|
Traceback (most recent call last):
File "cal_sounding_data.py", line 360, in <module>
newset_dic[k] = specStationSounding(k,dataset_dic)
File "cal_sounding_data.py", line 284, in specStationSounding
result_dic = soundingCalculation(num,p,t,td,ws,wd)
File "cal_sounding_data.py", line 170, in soundingCalculation
lfc_p, lfc_T = mpcalc.lfc(P,T,Td)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/xarray.py", line 138, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/units.py", line 290, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/calc/thermo.py", line 378, in lfc
return x[0], y[0]
File "/anaconda2/envs/py3/lib/python3.6/site-packages/pint/quantity.py", line 1281, in __getitem__
value = self._magnitude[key]
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def lfc(pressure, temperature, dewpt, parcel_temperature_profile=None):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity`
The LFC pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
temperature = temperature.to("degC")
parcel_temperature_profile = parcel_temperature_profile.to("degC")
# The parcel profile and data have the same first data point, so we ignore
# that point to get the real first intersection for the LFC calculation.
x, y = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="increasing",
)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
if np.all(_less_or_close(parcel_temperature_profile, temperature)):
# LFC doesn't exist
return np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = lcl(pressure[0], temperature[0], dewpt[0])
return x, y
# LFC exists and is not LCL. Make sure it is above the LCL.
else:
idx = x < lcl(pressure[0], temperature[0], dewpt[0])[0]
x = x[idx]
y = y[idx]
return x[0], y[0]
|
def lfc(pressure, temperature, dewpt, parcel_temperature_profile=None):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity`
The LFC pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
parcel_temperature_profile = parcel_profile(
pressure, temperature[0], dewpt[0]
).to("degC")
# The parcel profile and data have the same first data point, so we ignore
# that point to get the real first intersection for the LFC calculation.
x, y = find_intersections(
pressure[1:],
parcel_temperature_profile[1:],
temperature[1:],
direction="increasing",
)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
if np.all(_less_or_close(parcel_temperature_profile, temperature)):
# LFC doesn't exist
return np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = lcl(pressure[0], temperature[0], dewpt[0])
return x, y
# LFC exists and is not LCL. Make sure it is above the LCL.
else:
idx = x < lcl(pressure[0], temperature[0], dewpt[0])[0]
x = x[idx]
y = y[idx]
return x[0], y[0]
|
https://github.com/Unidata/MetPy/issues/902
|
Traceback (most recent call last):
File "cal_sounding_data.py", line 360, in <module>
newset_dic[k] = specStationSounding(k,dataset_dic)
File "cal_sounding_data.py", line 284, in specStationSounding
result_dic = soundingCalculation(num,p,t,td,ws,wd)
File "cal_sounding_data.py", line 170, in soundingCalculation
lfc_p, lfc_T = mpcalc.lfc(P,T,Td)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/xarray.py", line 138, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/units.py", line 290, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/calc/thermo.py", line 378, in lfc
return x[0], y[0]
File "/anaconda2/envs/py3/lib/python3.6/site-packages/pint/quantity.py", line 1281, in __getitem__
value = self._magnitude[key]
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def el(pressure, temperature, dewpt, parcel_temperature_profile=None):
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity, pint.Quantity`
The EL pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
temperature = temperature.to("degC")
parcel_temperature_profile = parcel_temperature_profile.to("degC")
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return np.nan * pressure.units, np.nan * temperature.units
# Otherwise the last intersection (as long as there is one) is the EL
x, y = find_intersections(
pressure[1:], parcel_temperature_profile[1:], temperature[1:]
)
if len(x) > 0:
return x[-1], y[-1]
else:
return np.nan * pressure.units, np.nan * temperature.units
|
def el(pressure, temperature, dewpt, parcel_temperature_profile=None):
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity, pint.Quantity`
The EL pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
parcel_temperature_profile = parcel_profile(
pressure, temperature[0], dewpt[0]
).to("degC")
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return np.nan * pressure.units, np.nan * temperature.units
# Otherwise the last intersection (as long as there is one) is the EL
x, y = find_intersections(
pressure[1:], parcel_temperature_profile[1:], temperature[1:]
)
if len(x) > 0:
return x[-1], y[-1]
else:
return np.nan * pressure.units, np.nan * temperature.units
|
https://github.com/Unidata/MetPy/issues/902
|
Traceback (most recent call last):
File "cal_sounding_data.py", line 360, in <module>
newset_dic[k] = specStationSounding(k,dataset_dic)
File "cal_sounding_data.py", line 284, in specStationSounding
result_dic = soundingCalculation(num,p,t,td,ws,wd)
File "cal_sounding_data.py", line 170, in soundingCalculation
lfc_p, lfc_T = mpcalc.lfc(P,T,Td)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/xarray.py", line 138, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/units.py", line 290, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/calc/thermo.py", line 378, in lfc
return x[0], y[0]
File "/anaconda2/envs/py3/lib/python3.6/site-packages/pint/quantity.py", line 1281, in __getitem__
value = self._magnitude[key]
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def parcel_profile(pressure, temperature, dewpt):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpt`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dew point
Returns
-------
`pint.Quantity`
The parcel temperatures at the specified pressure levels.
See Also
--------
lcl, moist_lapse, dry_lapse
"""
_, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpt)
return concatenate((t_l, t_u))
|
def parcel_profile(pressure, temperature, dewpt):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpt`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dew point
Returns
-------
`pint.Quantity`
The parcel temperatures at the specified pressure levels.
See Also
--------
lcl, moist_lapse, dry_lapse
"""
# Find the LCL
lcl_pressure, _ = lcl(pressure[0], temperature, dewpt)
lcl_pressure = lcl_pressure.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= lcl_pressure], lcl_pressure))
t1 = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure), lcl_pressure.m):
return t1[:-1]
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((lcl_pressure, pressure[pressure < lcl_pressure]))
t2 = moist_lapse(press_upper, t1[-1]).to(t1.units)
# Return LCL *without* the LCL point
return concatenate((t1[:-1], t2[1:]))
|
https://github.com/Unidata/MetPy/issues/902
|
Traceback (most recent call last):
File "cal_sounding_data.py", line 360, in <module>
newset_dic[k] = specStationSounding(k,dataset_dic)
File "cal_sounding_data.py", line 284, in specStationSounding
result_dic = soundingCalculation(num,p,t,td,ws,wd)
File "cal_sounding_data.py", line 170, in soundingCalculation
lfc_p, lfc_T = mpcalc.lfc(P,T,Td)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/xarray.py", line 138, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/units.py", line 290, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/calc/thermo.py", line 378, in lfc
return x[0], y[0]
File "/anaconda2/envs/py3/lib/python3.6/site-packages/pint/quantity.py", line 1281, in __getitem__
value = self._magnitude[key]
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def surface_based_cape_cin(pressure, temperature, dewpoint):
r"""Calculate surface-based CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile for a surface-based parcel. CIN is integrated
between the surface and LFC, CAPE is integrated between the LFC and EL (or top of
sounding). Intersection points of the measured temperature profile and parcel profile are
linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile. The first entry should be the starting
(surface) observation.
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dewpoint profile
Returns
-------
`pint.Quantity`
Surface based Convective Available Potential Energy (CAPE).
`pint.Quantity`
Surface based Convective INhibition (CIN).
See Also
--------
cape_cin, parcel_profile
"""
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
return cape_cin(p, t, td, profile)
|
def surface_based_cape_cin(pressure, temperature, dewpoint):
r"""Calculate surface-based CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile for a surface-based parcel. CIN is integrated
between the surface and LFC, CAPE is integrated between the LFC and EL (or top of
sounding). Intersection points of the measured temperature profile and parcel profile are
linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile. The first entry should be the starting
(surface) observation.
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dewpoint profile
Returns
-------
`pint.Quantity`
Surface based Convective Available Potential Energy (CAPE).
`pint.Quantity`
Surface based Convective INhibition (CIN).
See Also
--------
cape_cin, parcel_profile
"""
profile = parcel_profile(pressure, temperature[0], dewpoint[0])
return cape_cin(pressure, temperature, dewpoint, profile)
|
https://github.com/Unidata/MetPy/issues/902
|
Traceback (most recent call last):
File "cal_sounding_data.py", line 360, in <module>
newset_dic[k] = specStationSounding(k,dataset_dic)
File "cal_sounding_data.py", line 284, in specStationSounding
result_dic = soundingCalculation(num,p,t,td,ws,wd)
File "cal_sounding_data.py", line 170, in soundingCalculation
lfc_p, lfc_T = mpcalc.lfc(P,T,Td)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/xarray.py", line 138, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/units.py", line 290, in wrapper
return func(*args, **kwargs)
File "/anaconda2/envs/py3/lib/python3.6/site-packages/metpy/calc/thermo.py", line 378, in lfc
return x[0], y[0]
File "/anaconda2/envs/py3/lib/python3.6/site-packages/pint/quantity.py", line 1281, in __getitem__
value = self._magnitude[key]
IndexError: index 0 is out of bounds for axis 0 with size 0
|
IndexError
|
def isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
theta_levels : array
One-dimensional array of desired theta surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Other Parameters
----------------
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
tmpk_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for theta levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Change when Python 2.7 no longer supported
# Pull out keyword arguments
tmpk_out = kwargs.pop("tmpk_out", False)
max_iters = kwargs.pop("max_iters", 50)
eps = kwargs.pop("eps", 1e-6)
axis = kwargs.pop("axis", 0)
bottom_up_search = kwargs.pop("bottom_up_search", True)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to("hPa")
temperature = temperature.to("kelvin")
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
pres = np.broadcast_to(pres[slices], temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
theta_levels = np.asanyarray(theta_levels.to("kelvin")).reshape(-1)
isentlevels = theta_levels[np.argsort(theta_levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = kappa.m_as("dimensionless")
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(theta_levels):
raise ValueError("Input theta level out of data bounds")
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = P0**ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(
pres_theta.m, theta_levels, axis, from_below=bottom_up_search
)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(
_isen_iter,
isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps,
maxiter=max_iters,
)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if tmpk_out = true, calculate temperature and output as last item in list
if tmpk_out:
ret.append((isentlevs_nd / ((P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interp(
isentlevels, pres_theta.m, *(arr[sorter] for arr in args), axis=axis
)
if len(args) > 1:
ret.extend(others)
else:
ret.append(others)
return ret
|
def isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
theta_levels : array
One-dimensional array of desired theta surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Other Parameters
----------------
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
tmpk_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Change when Python 2.7 no longer supported
# Pull out keyword arguments
tmpk_out = kwargs.pop("tmpk_out", False)
max_iters = kwargs.pop("max_iters", 50)
eps = kwargs.pop("eps", 1e-6)
axis = kwargs.pop("axis", 0)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to("hPa")
temperature = temperature.to("kelvin")
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
pres = pres[slices]
pres = np.broadcast_to(pres, temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
theta_levels = np.asanyarray(theta_levels.to("kelvin")).reshape(-1)
sort_isentlevs = np.argsort(theta_levels)
tmpk = temperature[sorter]
isentlevels = theta_levels[sort_isentlevs]
# Make the desired isentropic levels the same shape as temperature
isentlevs_nd = isentlevels
isentlevs_nd = isentlevs_nd[slices]
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevs_nd, shape)
# exponent to Poisson's Equation, which is imported above
ka = kappa.to("dimensionless").m
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(theta_levels):
raise ValueError("Input theta level out of data bounds")
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = P0**ka
# index values for each point for the pressure level nearest to the desired theta level
minv = np.apply_along_axis(np.searchsorted, axis, pres_theta.m, theta_levels)
# Create index values for broadcasting arrays
above = broadcast_indices(tmpk, minv, ndim, axis)
below = broadcast_indices(tmpk, minv - 1, ndim, axis)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
first_guess = 0.5 * (log_p[above] + log_p[below])
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(
_isen_iter,
first_guess,
args=(isentlevs_nd, ka, a, b, pok.m),
xtol=eps,
maxiter=max_iters,
)
# get back pressure and assign nan for values with pressure greater than 1000 hPa
isentprs = np.exp(log_p_solved)
isentprs[isentprs > np.max(pressure.m)] = np.nan
# create list for storing output data
ret = []
ret.append(isentprs * units.hPa)
# if tmpk_out = true, calculate temperature and output as last item in list
if tmpk_out:
ret.append((isentlevs_nd / ((P0.m / isentprs) ** ka)) * units.kelvin)
# check to see if any additional arguments were given, if so, interpolate to
# new isentropic levels
try:
args[0]
except IndexError:
return ret
else:
# do an interpolation for each additional argument
for arr in args:
var = arr[sorter]
# interpolate to isentropic levels and add to temporary output array
arg_out = interp(isentlevels, pres_theta.m, var, axis=axis)
ret.append(arg_out)
# output values as a list
return ret
|
https://github.com/Unidata/MetPy/issues/769
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-5-859d6855c05e> in <module>()
----> 1 isent_anal = mcalc.isentropic_interpolation(isentlevs,lev,tmp,spech,uwnd,vwnd,hgt,tmpk_out=True)
/home/vgensini/.conda/envs/met421/lib/python2.7/site-packages/metpy/calc/thermo.pyc in isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs)
1185
1186 # calculate constants for the interpolation
-> 1187 a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
1188 b = tmpk.m[above] - a * log_p[above]
1189
/home/vgensini/.conda/envs/met421/lib/python2.7/site-packages/numpy/ma/core.pyc in __getitem__(self, indx)
3195 # mask of being reshaped if it hasn't been set up properly yet
3196 # So it's easier to stick to the current version
-> 3197 dout = self.data[indx]
3198 _mask = self._mask
3199
IndexError: index 29 is out of bounds for axis 0 with size 29
---------------------------------------------------------------------------
|
IndexError
|
def interp(x, xp, *args, **kwargs):
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.calc.interp(x_interp, x, y)
array([2.5, 3.5])
Notes
-----
xp and args must be the same shape.
"""
# Pull out keyword args
fill_value = kwargs.pop("fill_value", np.nan)
axis = kwargs.pop("axis", 0)
# Make x an array
x = np.asanyarray(x).reshape(-1)
# Save number of dimensions in xp
ndim = xp.ndim
# Sort input data
sort_args = np.argsort(xp, axis=axis)
sort_x = np.argsort(x)
# indices for sorting
sorter = broadcast_indices(xp, sort_args, ndim, axis)
# sort xp
xp = xp[sorter]
# Ensure pressure in increasing order
variables = [arr[sorter] for arr in args]
# Make x broadcast with xp
x_array = x[sort_x]
expand = [np.newaxis] * ndim
expand[axis] = slice(None)
x_array = x_array[expand]
# Calculate value above interpolated value
minv = np.apply_along_axis(np.searchsorted, axis, xp, x[sort_x])
minv2 = np.copy(minv)
# If fill_value is none and data is out of bounds, raise value error
if ((np.max(minv) == xp.shape[axis]) or (np.min(minv) == 0)) and fill_value is None:
raise ValueError("Interpolation point out of data bounds encountered")
# Warn if interpolated values are outside data bounds, will make these the values
# at end of data range.
if np.max(minv) == xp.shape[axis]:
warnings.warn("Interpolation point out of data bounds encountered")
minv2[minv == xp.shape[axis]] = xp.shape[axis] - 1
if np.min(minv) == 0:
minv2[minv == 0] = 1
# Get indices for broadcasting arrays
above = broadcast_indices(xp, minv2, ndim, axis)
below = broadcast_indices(xp, minv2 - 1, ndim, axis)
if np.any(x_array < xp[below]):
warnings.warn("Interpolation point out of data bounds encountered")
# Create empty output list
ret = []
# Calculate interpolation for each variable
for var in variables:
# Var needs to be on the *left* of the multiply to ensure that if it's a pint
# Quantity, it gets to control the operation--at least until we make sure
# masked arrays and pint play together better. See https://github.com/hgrecco/pint#633
var_interp = var[below] + (var[above] - var[below]) * (
(x_array - xp[below]) / (xp[above] - xp[below])
)
# Set points out of bounds to fill value.
var_interp[minv == xp.shape[axis]] = fill_value
var_interp[x_array < xp[below]] = fill_value
# Check for input points in decreasing order and return output to match.
if x[0] > x[-1]:
var_interp = np.swapaxes(np.swapaxes(var_interp, 0, axis)[::-1], 0, axis)
# Output to list
ret.append(var_interp)
if len(ret) == 1:
return ret[0]
else:
return ret
|
def interp(x, xp, *args, **kwargs):
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.calc.interp(x_interp, x, y)
array([2.5, 3.5])
Notes
-----
xp and args must be the same shape.
"""
# Pull out keyword args
fill_value = kwargs.pop("fill_value", np.nan)
axis = kwargs.pop("axis", 0)
# Make x an array
x = np.asanyarray(x).reshape(-1)
# Save number of dimensions in xp
ndim = xp.ndim
# Sort input data
sort_args = np.argsort(xp, axis=axis)
sort_x = np.argsort(x)
# indices for sorting
sorter = broadcast_indices(xp, sort_args, ndim, axis)
# sort xp
xp = xp[sorter]
# Ensure pressure in increasing order
variables = [arr[sorter] for arr in args]
# Make x broadcast with xp
x_array = x[sort_x]
expand = [np.newaxis] * ndim
expand[axis] = slice(None)
x_array = x_array[expand]
# Calculate value above interpolated value
minv = np.apply_along_axis(np.searchsorted, axis, xp, x[sort_x])
minv2 = np.copy(minv)
# If fill_value is none and data is out of bounds, raise value error
if ((np.max(minv) == xp.shape[axis]) or (np.min(minv) == 0)) and fill_value is None:
raise ValueError("Interpolation point out of data bounds encountered")
# Warn if interpolated values are outside data bounds, will make these the values
# at end of data range.
if np.max(minv) == xp.shape[axis]:
warnings.warn("Interpolation point out of data bounds encountered")
minv2[minv == xp.shape[axis]] = xp.shape[axis] - 1
if np.min(minv) == 0:
minv2[minv == 0] = 1
# Get indices for broadcasting arrays
above = broadcast_indices(xp, minv2, ndim, axis)
below = broadcast_indices(xp, minv2 - 1, ndim, axis)
if np.any(x_array < xp[below]):
warnings.warn("Interpolation point out of data bounds encountered")
# Create empty output list
ret = []
# Calculate interpolation for each variable
for var in variables:
var_interp = var[below] + ((x_array - xp[below]) / (xp[above] - xp[below])) * (
var[above] - var[below]
)
# Set points out of bounds to fill value.
var_interp[minv == xp.shape[axis]] = fill_value
var_interp[x_array < xp[below]] = fill_value
# Check for input points in decreasing order and return output to match.
if x[0] > x[-1]:
var_interp = np.swapaxes(np.swapaxes(var_interp, 0, axis)[::-1], 0, axis)
# Output to list
ret.append(var_interp)
if len(ret) == 1:
return ret[0]
else:
return ret
|
https://github.com/Unidata/MetPy/issues/769
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-5-859d6855c05e> in <module>()
----> 1 isent_anal = mcalc.isentropic_interpolation(isentlevs,lev,tmp,spech,uwnd,vwnd,hgt,tmpk_out=True)
/home/vgensini/.conda/envs/met421/lib/python2.7/site-packages/metpy/calc/thermo.pyc in isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs)
1185
1186 # calculate constants for the interpolation
-> 1187 a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
1188 b = tmpk.m[above] - a * log_p[above]
1189
/home/vgensini/.conda/envs/met421/lib/python2.7/site-packages/numpy/ma/core.pyc in __getitem__(self, indx)
3195 # mask of being reshaped if it hasn't been set up properly yet
3196 # So it's easier to stick to the current version
-> 3197 dout = self.data[indx]
3198 _mask = self._mask
3199
IndexError: index 29 is out of bounds for axis 0 with size 29
---------------------------------------------------------------------------
|
IndexError
|
def isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
theta_levels : array
One-dimensional array of desired theta surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Other Parameters
----------------
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
tmpk_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Change when Python 2.7 no longer supported
# Pull out keyword arguments
tmpk_out = kwargs.pop("tmpk_out", False)
max_iters = kwargs.pop("max_iters", 50)
eps = kwargs.pop("eps", 1e-6)
axis = kwargs.pop("axis", 0)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to("hPa")
temperature = temperature.to("kelvin")
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
pres = np.broadcast_to(pres[slices], temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
theta_levels = np.asanyarray(theta_levels.to("kelvin")).reshape(-1)
isentlevels = theta_levels[np.argsort(theta_levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = kappa.m_as("dimensionless")
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(theta_levels):
raise ValueError("Input theta level out of data bounds")
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = P0**ka
# index values for each point for the pressure level nearest to the desired theta level
minv = np.apply_along_axis(np.searchsorted, axis, pres_theta.m, theta_levels)
# Filter out points where we get indices above the top; theta below the bottom will be
# masked by comparing to the max pressure later.
good = minv < pres_theta.shape[axis]
minv[~good] = 0
# Create index values for broadcasting arrays
above = broadcast_indices(tmpk, minv, ndim, axis)
below = broadcast_indices(tmpk, minv - 1, ndim, axis)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(
_isen_iter,
isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps,
maxiter=max_iters,
)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if tmpk_out = true, calculate temperature and output as last item in list
if tmpk_out:
ret.append((isentlevs_nd / ((P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interp(
isentlevels, pres_theta.m, *(arr[sorter] for arr in args), axis=axis
)
if len(args) > 1:
ret.extend(others)
else:
ret.append(others)
return ret
|
def isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
theta_levels : array
One-dimensional array of desired theta surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Other Parameters
----------------
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
tmpk_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Change when Python 2.7 no longer supported
# Pull out keyword arguments
tmpk_out = kwargs.pop("tmpk_out", False)
max_iters = kwargs.pop("max_iters", 50)
eps = kwargs.pop("eps", 1e-6)
axis = kwargs.pop("axis", 0)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to("hPa")
temperature = temperature.to("kelvin")
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
pres = np.broadcast_to(pres[slices], temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
theta_levels = np.asanyarray(theta_levels.to("kelvin")).reshape(-1)
isentlevels = theta_levels[np.argsort(theta_levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = kappa.m_as("dimensionless")
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(theta_levels):
raise ValueError("Input theta level out of data bounds")
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = P0**ka
# index values for each point for the pressure level nearest to the desired theta level
minv = np.apply_along_axis(np.searchsorted, axis, pres_theta.m, theta_levels)
# Create index values for broadcasting arrays
above = broadcast_indices(tmpk, minv, ndim, axis)
below = broadcast_indices(tmpk, minv - 1, ndim, axis)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
first_guess = 0.5 * (log_p[above] + log_p[below])
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(
_isen_iter,
first_guess,
args=(isentlevs_nd, ka, a, b, pok.m),
xtol=eps,
maxiter=max_iters,
)
# get back pressure and assign nan for values with pressure greater than 1000 hPa
isentprs = np.exp(log_p_solved)
isentprs[isentprs > np.max(pressure.m)] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if tmpk_out = true, calculate temperature and output as last item in list
if tmpk_out:
ret.append((isentlevs_nd / ((P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interp(
isentlevels, pres_theta.m, *(arr[sorter] for arr in args), axis=axis
)
if len(args) > 1:
ret.extend(others)
else:
ret.append(others)
return ret
|
https://github.com/Unidata/MetPy/issues/769
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-5-859d6855c05e> in <module>()
----> 1 isent_anal = mcalc.isentropic_interpolation(isentlevs,lev,tmp,spech,uwnd,vwnd,hgt,tmpk_out=True)
/home/vgensini/.conda/envs/met421/lib/python2.7/site-packages/metpy/calc/thermo.pyc in isentropic_interpolation(theta_levels, pressure, temperature, *args, **kwargs)
1185
1186 # calculate constants for the interpolation
-> 1187 a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
1188 b = tmpk.m[above] - a * log_p[above]
1189
/home/vgensini/.conda/envs/met421/lib/python2.7/site-packages/numpy/ma/core.pyc in __getitem__(self, indx)
3195 # mask of being reshaped if it hasn't been set up properly yet
3196 # So it's easier to stick to the current version
-> 3197 dout = self.data[indx]
3198 _mask = self._mask
3199
IndexError: index 29 is out of bounds for axis 0 with size 29
---------------------------------------------------------------------------
|
IndexError
|
def lcl(pressure, temperature, dewpt, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpt`,
and `pressure`.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dew point
Returns
-------
`(pint.Quantity, pint.Quantity)`
The LCL pressure and temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dew point from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
"""
def _lcl_iter(p, p0, w, t):
td = dewpoint(vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1.0 / kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpt), pressure)
fp = so.fixed_point(
_lcl_iter,
pressure.m,
args=(pressure.m, w, temperature),
xtol=eps,
maxiter=max_iters,
)
lcl_p = fp * pressure.units
return lcl_p, dewpoint(vapor_pressure(lcl_p, w))
|
def lcl(pressure, temperature, dewpt, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpt`,
and `pressure`.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dew point
Returns
-------
`(pint.Quantity, pint.Quantity)`
The LCL pressure and temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dew point from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
"""
def _lcl_iter(p, p0, w, t):
td = dewpoint(vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1.0 / kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpt), pressure)
fp = so.fixed_point(
_lcl_iter,
pressure.m,
args=(pressure.m, w, temperature),
xtol=eps,
maxiter=max_iters,
)
lcl_p = units.Quantity(fp, pressure.units)
return lcl_p, dewpoint(vapor_pressure(lcl_p, w))
|
https://github.com/Unidata/MetPy/issues/619
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/pint/quantity.py in _repr_html_(self)
213 # IPython related code
214 def _repr_html_(self):
--> 215 return self.__format__('H')
216
217 def _repr_latex_(self):
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/pint/quantity.py in __format__(self, spec)
177 # this is required to have the magnitude and unit in the same line
178 allf = r'\[{0} {1}\]'
--> 179 parts = ndarray_to_latex_parts(obj.magnitude, mspec)
180
181 if len(parts) > 1:
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/pint/formatting.py in ndarray_to_latex_parts(ndarr, fmtfun, dim)
304 ret += [header % elno + ' = ' + matrix_to_latex(el, fmtfun)]
305 else:
--> 306 for elno, el in enumerate(ndarr):
307 ret += ndarray_to_latex_parts(el, fmtfun, dim + (elno, ))
308
TypeError: iteration over a 0-d array
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/pint/quantity.py in _repr_latex_(self)
216
217 def _repr_latex_(self):
--> 218 return "$" + self.__format__('L') + "$"
219
220 @property
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/pint/quantity.py in __format__(self, spec)
173 if isinstance(self.magnitude, ndarray):
174 if 'L' in spec:
--> 175 mstr = ndarray_to_latex(obj.magnitude, mspec)
176 elif 'H' in spec:
177 # this is required to have the magnitude and unit in the same line
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/pint/formatting.py in ndarray_to_latex(ndarr, fmtfun, dim)
311
312 def ndarray_to_latex(ndarr, fmtfun=lambda x: format(x, '.2f'), dim=()):
--> 313 return '\n'.join(ndarray_to_latex_parts(ndarr, fmtfun, dim))
~/miniconda3/envs/metpydev/lib/python3.6/site-packages/pint/formatting.py in ndarray_to_latex_parts(ndarr, fmtfun, dim)
304 ret += [header % elno + ' = ' + matrix_to_latex(el, fmtfun)]
305 else:
--> 306 for elno, el in enumerate(ndarr):
307 ret += ndarray_to_latex_parts(el, fmtfun, dim + (elno, ))
308
TypeError: iteration over a 0-d array
Out[27]:
<Quantity(929.7098864382868, 'hectopascal')>
|
TypeError
|
def supercell_composite(mucape, effective_storm_helicity, effective_shear):
r"""Calculate the supercell composite parameter.
The supercell composite parameter is designed to identify
environments favorable for the development of supercells,
and is calculated using the formula developed by
[Thompson2004]_:
.. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} *
\frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} *
\frac{\text{Effective Shear}}{20 \text{m/s}}
The effective_shear term is set to zero below 10 m/s and
capped at 1 when effective_shear exceeds 20 m/s.
Parameters
----------
mucape : `pint.Quantity`
Most-unstable CAPE
effective_storm_helicity : `pint.Quantity`
Effective-layer storm-relative helicity
effective_shear : `pint.Quantity`
Effective bulk shear
Returns
-------
array-like
supercell composite
"""
effective_shear = np.clip(atleast_1d(effective_shear), None, 20 * units("m/s"))
effective_shear[effective_shear < 10 * units("m/s")] = 0 * units("m/s")
effective_shear = effective_shear / (20 * units("m/s"))
return (
(mucape / (1000 * units("J/kg")))
* (effective_storm_helicity / (50 * units("m^2/s^2")))
* effective_shear
).to("dimensionless")
|
def supercell_composite(mucape, effective_storm_helicity, effective_shear):
r"""Calculate the supercell composite parameter.
The supercell composite parameter is designed to identify
environments favorable for the development of supercells,
and is calculated using the formula developed by
[Thompson2004]_:
SCP = (mucape / 1000 J/kg) * (effective_storm_helicity / 50 m^2/s^2) *
(effective_shear / 20 m/s)
The effective_shear term is set to zero below 10 m/s and
capped at 1 when effective_shear exceeds 20 m/s.
Parameters
----------
mucape : `pint.Quantity`
Most-unstable CAPE
effective_storm_helicity : `pint.Quantity`
Effective-layer storm-relative helicity
effective_shear : `pint.Quantity`
Effective bulk shear
Returns
-------
array-like
supercell composite
"""
effective_shear = np.clip(effective_shear, None, 20 * units("m/s"))
effective_shear[effective_shear < 10 * units("m/s")] = 0 * units("m/s")
effective_shear = effective_shear / (20 * units("m/s"))
return (
(mucape / (1000 * units("J/kg")))
* (effective_storm_helicity / (50 * units("m^2/s^2")))
* effective_shear
).to("dimensionless")
|
https://github.com/Unidata/MetPy/issues/608
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/usr/local/tools/anaconda3/lib/python3.6/site-packages/pint/quantity.py in __setitem__(self, key, value)
1306 '`obj.magnitude[%s] = %s`' % (key, value))
-> 1307 self._magnitude[key] = factor.magnitude
1308 else:
TypeError: 'numpy.float64' object does not support item assignment
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-15-13b7a3f081c1> in <module>()
----> 1 mpcalc.significant_tornado(sucape, sblcl, tsh1km, bs6km)
/usr/local/tools/anaconda3/lib/python3.6/site-packages/metpy/calc/indices.py in significant_tornado(sbcape, sblcl, storm_helicity_1km, shear_6km)
283 sblcl = (2000. * units('meter') - sblcl) / (1000. * units('meter'))
284 shear_6km = np.clip(shear_6km, None, 30 * units('m/s'))
--> 285 shear_6km[shear_6km < 12.5 * units('m/s')] = 0 * units('m/s')
286 shear_6km = shear_6km / (20 * units('m/s'))
287
/usr/local/tools/anaconda3/lib/python3.6/site-packages/pint/quantity.py in __setitem__(self, key, value)
1311 except TypeError:
1312 raise TypeError("Neither Quantity object nor its magnitude ({0})"
-> 1313 "supports indexing".format(self._magnitude))
1314
1315 def tolist(self):
TypeError: Neither Quantity object nor its magnitude (22.927608143881393)supports indexing
|
TypeError
|
def significant_tornado(
sbcape, surface_based_lcl_height, storm_helicity_1km, shear_6km
):
r"""Calculate the significant tornado parameter (fixed layer).
The significant tornado parameter is designed to identify
environments favorable for the production of significant
tornadoes contingent upon the development of supercells.
It's calculated according to the formula used on the SPC
mesoanalysis page, updated in [Thompson2004]_:
.. math:: \text{SIGTOR} = \frac{\text{SBCAPE}}{1500 \text{J/kg}} * \frac{(2000 \text{m} -
\text{LCL}_\text{SB})}{1000 \text{m}} *
\frac{SRH_{\text{1km}}}{150 \text{m}^\text{s}/\text{s}^2} *
\frac{\text{Shear}_\text{6km}}{20 \text{m/s}}
The lcl height is set to zero when the lcl is above 2000m and
capped at 1 when below 1000m, and the shr6 term is set to 0
when shr6 is below 12.5 m/s and maxed out at 1.5 when shr6
exceeds 30 m/s.
Parameters
----------
sbcape : `pint.Quantity`
Surface-based CAPE
surface_based_lcl_height : `pint.Quantity`
Surface-based lifted condensation level
storm_helicity_1km : `pint.Quantity`
Surface-1km storm-relative helicity
shear_6km : `pint.Quantity`
Surface-6km bulk shear
Returns
-------
array-like
significant tornado parameter
"""
surface_based_lcl_height = np.clip(
atleast_1d(surface_based_lcl_height),
1000 * units("meter"),
2000 * units("meter"),
)
surface_based_lcl_height[surface_based_lcl_height > 2000 * units("meter")] = (
0 * units("meter")
)
surface_based_lcl_height = (2000.0 * units("meter") - surface_based_lcl_height) / (
1000.0 * units("meter")
)
shear_6km = np.clip(atleast_1d(shear_6km), None, 30 * units("m/s"))
shear_6km[shear_6km < 12.5 * units("m/s")] = 0 * units("m/s")
shear_6km /= 20 * units("m/s")
return (
(sbcape / (1500.0 * units("J/kg")))
* surface_based_lcl_height
* (storm_helicity_1km / (150.0 * units("m^2/s^2")))
* shear_6km
)
|
def significant_tornado(sbcape, sblcl, storm_helicity_1km, shear_6km):
r"""Calculate the significant tornado parameter (fixed layer).
The significant tornado parameter is designed to identify
environments favorable for the production of significant
tornadoes contingent upon the development of supercells.
It's calculated according to the formula used on the SPC
mesoanalysis page, updated in [Thompson2004]_:
sigtor = (sbcape / 1500 J/kg) * ((2000 m - sblcl) / 1000 m) *
(storm_helicity_1km / 150 m^s/s^2) * (shear_6km6 / 20 m/s)
The sblcl term is set to zero when the lcl is above 2000m and
capped at 1 when below 1000m, and the shr6 term is set to 0
when shr6 is below 12.5 m/s and maxed out at 1.5 when shr6
exceeds 30 m/s.
Parameters
----------
sbcape : `pint.Quantity`
Surface-based CAPE
sblcl : `pint.Quantity`
Surface-based lifted condensation level
storm_helicity_1km : `pint.Quantity`
Surface-1km storm-relative helicity
shear_6km : `pint.Quantity`
Surface-6km bulk shear
Returns
-------
array-like
significant tornado parameter
"""
sblcl = np.clip(sblcl, 1000 * units("meter"), 2000 * units("meter"))
sblcl[sblcl > 2000 * units("meter")] = 0 * units("meter")
sblcl = (2000.0 * units("meter") - sblcl) / (1000.0 * units("meter"))
shear_6km = np.clip(shear_6km, None, 30 * units("m/s"))
shear_6km[shear_6km < 12.5 * units("m/s")] = 0 * units("m/s")
shear_6km = shear_6km / (20 * units("m/s"))
return (
(sbcape / (1500.0 * units("J/kg")))
* sblcl
* (storm_helicity_1km / (150.0 * units("m^2/s^2")))
* shear_6km
)
|
https://github.com/Unidata/MetPy/issues/608
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/usr/local/tools/anaconda3/lib/python3.6/site-packages/pint/quantity.py in __setitem__(self, key, value)
1306 '`obj.magnitude[%s] = %s`' % (key, value))
-> 1307 self._magnitude[key] = factor.magnitude
1308 else:
TypeError: 'numpy.float64' object does not support item assignment
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-15-13b7a3f081c1> in <module>()
----> 1 mpcalc.significant_tornado(sucape, sblcl, tsh1km, bs6km)
/usr/local/tools/anaconda3/lib/python3.6/site-packages/metpy/calc/indices.py in significant_tornado(sbcape, sblcl, storm_helicity_1km, shear_6km)
283 sblcl = (2000. * units('meter') - sblcl) / (1000. * units('meter'))
284 shear_6km = np.clip(shear_6km, None, 30 * units('m/s'))
--> 285 shear_6km[shear_6km < 12.5 * units('m/s')] = 0 * units('m/s')
286 shear_6km = shear_6km / (20 * units('m/s'))
287
/usr/local/tools/anaconda3/lib/python3.6/site-packages/pint/quantity.py in __setitem__(self, key, value)
1311 except TypeError:
1312 raise TypeError("Neither Quantity object nor its magnitude ({0})"
-> 1313 "supports indexing".format(self._magnitude))
1314
1315 def tolist(self):
TypeError: Neither Quantity object nor its magnitude (22.927608143881393)supports indexing
|
TypeError
|
def storm_relative_helicity(
u,
v,
heights,
depth,
bottom=0 * units.m,
storm_u=0 * units("m/s"),
storm_v=0 * units("m/s"),
):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
"""
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = (
storm_relative_u[1:] * storm_relative_v[:-1]
- storm_relative_u[:-1] * storm_relative_v[1:]
)
positive_srh = int_layers[int_layers.magnitude > 0.0].sum()
negative_srh = int_layers[int_layers.magnitude < 0.0].sum()
return (
positive_srh.to("meter ** 2 / second ** 2"),
negative_srh.to("meter ** 2 / second ** 2"),
(positive_srh + negative_srh).to("meter ** 2 / second ** 2"),
)
|
def storm_relative_helicity(
u,
v,
p,
hgt,
top,
bottom=0 * units("meter"),
storm_u=0 * units("m/s"),
storm_v=0 * units("m/s"),
):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Needs u and v wind components, heights and pressures,
and top and bottom of SRH layer. An optional storm
motion vector can be specified. SRH is calculated using the
equation specified on p. 230-231 in the Markowski and Richardson
meso textbook [Markowski2010].
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
The u components of winds, same length as hgts
v : array-like
The u components of winds, same length as hgts
p : array-like
Pressure in hPa, same length as hgts
hgt : array-like
The heights associatd with the data, provided in meters above mean
sea level and converted into meters AGL.
top : number
The height of the top of the desired layer for SRH.
bottom : number
The height at the bottom of the SRH layer. Default is sfc (None).
storm_u : number
u component of storm motion
storm_v : number
v component of storm motion
Returns
-------
number
p_srh : positive storm-relative helicity
number
n_srh : negative storm-relative helicity
number
t_srh : total storm-relative helicity
"""
# Converting to m/s to make sure output is in m^2/s^2
u = u.to("meters/second")
v = v.to("meters/second")
storm_u = storm_u.to("meters/second")
storm_v = storm_v.to("meters/second")
w_int = get_layer(p, u, v, heights=hgt, bottom=bottom, depth=top - bottom)
sru = w_int[1] - storm_u
srv = w_int[2] - storm_v
int_layers = sru[1:] * srv[:-1] - sru[:-1] * srv[1:]
p_srh = int_layers[int_layers.magnitude > 0.0].sum()
n_srh = int_layers[int_layers.magnitude < 0.0].sum()
t_srh = p_srh + n_srh
return p_srh, n_srh, t_srh
|
https://github.com/Unidata/MetPy/issues/576
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-b6078272f411> in <module>()
11 hgt = dataset['height'].values * units(dataset.units['height'])
12
---> 13 sreh = mpcalc.storm_relative_helicity(u, v, p, hgt.to('meter'), 6000 * units('meter'))
C:\Users\bradi\Anaconda3\lib\site-packages\metpy\units.py in wrapper(*args, **kwargs)
206 ' x = x * units.meter / units.second')
207 raise ValueError(msg)
--> 208 return func(*args, **kwargs)
209
210 return wrapper
C:\Users\bradi\Anaconda3\lib\site-packages\metpy\calc\kinematics.py in storm_relative_helicity(u, v, p, hgt, top, bottom, storm_u, storm_v)
596 storm_v = storm_v.to('meters/second')
597
--> 598 w_int = get_layer(p, u, v, heights=hgt, bottom=bottom, depth=top - bottom)
599
600 sru = w_int[1] - storm_u
C:\Users\bradi\Anaconda3\lib\site-packages\metpy\units.py in wrapper(*args, **kwargs)
206 ' x = x * units.meter / units.second')
207 raise ValueError(msg)
--> 208 return func(*args, **kwargs)
209
210 return wrapper
C:\Users\bradi\Anaconda3\lib\site-packages\metpy\calc\tools.py in get_layer(pressure, *args, **kwargs)
448 bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
449 heights=heights,
--> 450 interpolate=interpolate)
451
452 # Calculate the top if whatever units depth is in
C:\Users\bradi\Anaconda3\lib\site-packages\metpy\calc\tools.py in _get_bound_pressure_height(pressure, bound, heights, interpolate)
390 if heights is not None:
391 if (bound_height > np.max(heights)) or (bound_height < np.min(heights)):
--> 392 raise ValueError('Specified bound is outside height range.')
393
394 return bound_pressure, bound_height
ValueError: Specified bound is outside height range.
|
ValueError
|
async def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
await asyncio.wrap_future(self.pod_reflector.first_load_future)
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data["status"]["phase"] == "Pending":
return None
ctr_stat = data["status"].get("containerStatuses")
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c["name"] == "notebook":
if "terminated" in c["state"]:
# call self.stop to delete the pod
if self.delete_stopped_pods:
await self.stop(now=True)
return c["state"]["terminated"]["exitCode"]
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
|
async def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
await self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data["status"]["phase"] == "Pending":
return None
ctr_stat = data["status"].get("containerStatuses")
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c["name"] == "notebook":
if "terminated" in c["state"]:
# call self.stop to delete the pod
if self.delete_stopped_pods:
await self.stop(now=True)
return c["state"]["terminated"]["exitCode"]
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
|
https://github.com/jupyterhub/kubespawner/issues/466
|
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/jupyterhub/app.py", line 2032, in check_spawner
status = await spawner.poll()
File "/usr/local/lib/python3.8/dist-packages/kubespawner/spawner.py", line 1610, in poll
await self.pod_reflector.first_load_future
TypeError: object Future can't be used in 'await' expression
|
TypeError
|
def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data["status"]["phase"] == "Pending":
return None
ctr_stat = data["status"].get("containerStatuses")
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c["name"] == "notebook":
if "terminated" in c["state"]:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c["state"]["terminated"]["exitCode"]
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
|
def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data["status"]["phase"] == "Pending":
return None
ctr_stat = data["status"]["containerStatuses"]
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c["name"] == "notebook":
if "terminated" in c["state"]:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c["state"]["terminated"]["exitCode"]
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1
|
https://github.com/jupyterhub/kubespawner/issues/440
|
Sep 29 15:12:00hub-6788d79c4-mct9jhubERRORERROR 2020-09-29T20:12:00.742Z [JupyterHub user:645] Unhandled error starting 5f5118929d1395001a6aa4df's server: pod/jupyter-5f5118929d1395001a6aa4df did not start in 300 seconds!
Sep 29 15:12:00hub-6788d79c4-mct9jhubERRORERROR 2020-09-29T20:12:00.778Z [JupyterHub user:657] Failed to cleanup 5f5118929d1395001a6aa4df's server that failed to start
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/user.py", line 651, in spawn
await self.stop(spawner.name)
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/user.py", line 748, in stop
status = await spawner.poll()
File "/usr/local/lib/python3.6/dist-packages/kubespawner/spawner.py", line 1572, in poll
ctr_stat = data["status"]["containerStatuses"]
KeyError: 'containerStatuses'
Sep 29 15:12:00hub-6788d79c4-mct9jhubERRORERROR 2020-09-29T20:12:00.779Z [JupyterHub gen:599] Exception in Future <Task finished coro=<BaseHandler.spawn_single_user.<locals>.finish_user_spawn() done, defined at /usr/local/lib/python3.6/dist-packages/jupyterhub/handlers/base.py:845> exception=TimeoutError('pod/jupyter-5f5118929d1395001a6aa4df did not start in 300 seconds!',)> after timeout
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tornado/gen.py", line 593, in error_callback
future.result()
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/handlers/base.py", line 852, in finish_user_spawn
await spawn_future
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/user.py", line 661, in spawn
raise e
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/user.py", line 560, in spawn
url = await gen.with_timeout(timedelta(seconds=spawner.start_timeout), f)
File "/usr/local/lib/python3.6/dist-packages/kubespawner/spawner.py", line 1854, in _start
timeout=self.start_timeout,
File "/usr/local/lib/python3.6/dist-packages/jupyterhub/utils.py", line 177, in exponential_backoff
raise TimeoutError(fail_message)
TimeoutError: pod/jupyter-5f5118929d1395001a6aa4df did not start in 300 seconds!
|
KeyError
|
def update_info(self, data, params=None, headers=None, **kwargs):
"""Update information about this object.
Send a PUT to the object's base endpoint to modify the provided
attributes.
:param data:
The updated information about this object.
Must be JSON serializable.
Update the object attributes in data.keys(). The semantics of the
values depends on the the type and attributes of the object being
updated. For details on particular semantics, refer to the Box
developer API documentation <https://developer.box.com/>.
:type data:
`dict`
:param params:
(optional) Query string parameters for the request.
:type params:
`dict` or None
:param headers:
(optional) Extra HTTP headers for the request.
:type headers:
`dict` or None
:param kwargs:
Optional arguments that ``put`` takes.
:return:
The updated object.
Return a new object of the same type, without modifying the
original object passed as self.
Construct the new object with all the default attributes that are
returned from the endpoint.
:rtype:
:class:`BaseObject`
"""
# pylint:disable=no-else-return
url = self.get_url()
box_response = self._session.put(
url, data=json.dumps(data), params=params, headers=headers, **kwargs
)
if "expect_json_response" in kwargs and not kwargs["expect_json_response"]:
return box_response.ok
else:
return self.translator.translate(
session=self._session,
response_object=box_response.json(),
)
|
def update_info(self, data, params=None, headers=None, **kwargs):
"""Update information about this object.
Send a PUT to the object's base endpoint to modify the provided
attributes.
:param data:
The updated information about this object.
Must be JSON serializable.
Update the object attributes in data.keys(). The semantics of the
values depends on the the type and attributes of the object being
updated. For details on particular semantics, refer to the Box
developer API documentation <https://developer.box.com/>.
:type data:
`dict`
:param params:
(optional) Query string parameters for the request.
:type params:
`dict` or None
:param headers:
(optional) Extra HTTP headers for the request.
:type headers:
`dict` or None
:param kwargs:
Optional arguments that ``put`` takes.
:return:
The updated object.
Return a new object of the same type, without modifying the
original object passed as self.
Construct the new object with all the default attributes that are
returned from the endpoint.
:rtype:
:class:`BaseObject`
"""
url = self.get_url()
box_response = self._session.put(
url, data=json.dumps(data), params=params, headers=headers, **kwargs
)
return self.translator.translate(
session=self._session,
response_object=box_response.json(),
)
|
https://github.com/box/box-python-sdk/issues/528
|
Adding collab
Updated collab
Traceback (most recent call last):
File "./box_reorg_test.py", line 43, in <module>
main()
File "./box_reorg_test.py", line 33, in main
_ = src_collab.update_info(CollaborationRole.OWNER)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/util/api_call_decorator.py", line 71, in call
return method(*args, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/object/collaboration.py", line 61, in update_info
return super(Collaboration, self).update_info(data=data)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/util/api_call_decorator.py", line 71, in call
return method(*args, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/object/base_object.py", line 127, in update_info
box_response = self._session.put(url, data=json.dumps(data), params=params, headers=headers, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 121, in put
return self.request('PUT', url, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 157, in request
response = self._prepare_and_send_request(method, url, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 400, in _prepare_and_send_request
self._raise_on_unsuccessful_request(network_response, request)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 332, in _raise_on_unsuccessful_request
network_response=network_response,
boxsdk.exception.BoxAPIException: Message: Non-json response received, while expecting json response.
Status: 204
Code: None
Request ID: None
Headers: {'Date': 'Mon, 27 Jul 2020 23:16:54 GMT', 'Connection': 'keep-alive', 'Strict-Transport-Security': 'max-age=31536000', 'Cache-Control': 'no-cache, no-store', 'BOX-REQUEST-ID': '065305bafe80ff3870567b8c931b5d5aa'}
URL: https://api.box.com/2.0/collaborations/27688108163
Method: PUT
Context Info: None
|
boxsdk.exception.BoxAPIException
|
def update_info(self, role=None, status=None):
"""Edit an existing collaboration on Box
:param role:
The new role for this collaboration or None to leave unchanged
:type role:
:class:`CollaborationRole`
:param status:
The new status for this collaboration or None to leave unchanged. A pending collaboration can be set to
accepted or rejected if permissions allow it.
:type status:
:class:`CollaborationStatus`
:returns:
Whether or not the edit was successful.
:rtype:
`bool`
:raises:
:class:`BoxAPIException` if current user doesn't have permissions to edit the collaboration.
"""
# pylint:disable=arguments-differ,no-else-return
data = {}
if role:
data["role"] = role
if status:
data["status"] = status
if role == CollaborationRole.OWNER:
return super(Collaboration, self).update_info(
data=data, expect_json_response=False
)
else:
return super(Collaboration, self).update_info(data=data)
|
def update_info(self, role=None, status=None):
"""Edit an existing collaboration on Box
:param role:
The new role for this collaboration or None to leave unchanged
:type role:
:class:`CollaborationRole`
:param status:
The new status for this collaboration or None to leave unchanged. A pending collaboration can be set to
accepted or rejected if permissions allow it.
:type status:
:class:`CollaborationStatus`
:returns:
Whether or not the edit was successful.
:rtype:
`bool`
:raises:
:class:`BoxAPIException` if current user doesn't have permissions to edit the collaboration.
"""
# pylint:disable=arguments-differ
data = {}
if role:
data["role"] = role
if status:
data["status"] = status
return super(Collaboration, self).update_info(data=data)
|
https://github.com/box/box-python-sdk/issues/528
|
Adding collab
Updated collab
Traceback (most recent call last):
File "./box_reorg_test.py", line 43, in <module>
main()
File "./box_reorg_test.py", line 33, in main
_ = src_collab.update_info(CollaborationRole.OWNER)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/util/api_call_decorator.py", line 71, in call
return method(*args, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/object/collaboration.py", line 61, in update_info
return super(Collaboration, self).update_info(data=data)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/util/api_call_decorator.py", line 71, in call
return method(*args, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/object/base_object.py", line 127, in update_info
box_response = self._session.put(url, data=json.dumps(data), params=params, headers=headers, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 121, in put
return self.request('PUT', url, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 157, in request
response = self._prepare_and_send_request(method, url, **kwargs)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 400, in _prepare_and_send_request
self._raise_on_unsuccessful_request(network_response, request)
File "/Users/my_username/workspace/box-scripts/venv/lib/python3.7/site-packages/boxsdk/session/session.py", line 332, in _raise_on_unsuccessful_request
network_response=network_response,
boxsdk.exception.BoxAPIException: Message: Non-json response received, while expecting json response.
Status: 204
Code: None
Request ID: None
Headers: {'Date': 'Mon, 27 Jul 2020 23:16:54 GMT', 'Connection': 'keep-alive', 'Strict-Transport-Security': 'max-age=31536000', 'Cache-Control': 'no-cache, no-store', 'BOX-REQUEST-ID': '065305bafe80ff3870567b8c931b5d5aa'}
URL: https://api.box.com/2.0/collaborations/27688108163
Method: PUT
Context Info: None
|
boxsdk.exception.BoxAPIException
|
def cluster_vectorspace(self, vectors, trace=False):
if self._means and self._repeats > 1:
print("Warning: means will be discarded for subsequent trials")
meanss = []
for trial in range(self._repeats):
if trace:
print("k-means trial", trial)
if not self._means or trial > 1:
self._means = self._rng.sample(list(vectors), self._num_means)
self._cluster_vectorspace(vectors, trace)
meanss.append(self._means)
if len(meanss) > 1:
# sort the means first (so that different cluster numbering won't
# effect the distance comparison)
for means in meanss:
means.sort(key=sum)
# find the set of means that's minimally different from the others
min_difference = min_means = None
for i in range(len(meanss)):
d = 0
for j in range(len(meanss)):
if i != j:
d += self._sum_distances(meanss[i], meanss[j])
if min_difference is None or d < min_difference:
min_difference, min_means = d, meanss[i]
# use the best means
self._means = min_means
|
def cluster_vectorspace(self, vectors, trace=False):
if self._means and self._repeats > 1:
print("Warning: means will be discarded for subsequent trials")
meanss = []
for trial in range(self._repeats):
if trace:
print("k-means trial", trial)
if not self._means or trial > 1:
self._means = self._rng.sample(vectors, self._num_means)
self._cluster_vectorspace(vectors, trace)
meanss.append(self._means)
if len(meanss) > 1:
# sort the means first (so that different cluster numbering won't
# effect the distance comparison)
for means in meanss:
means.sort(key=sum)
# find the set of means that's minimally different from the others
min_difference = min_means = None
for i in range(len(meanss)):
d = 0
for j in range(len(meanss)):
if i != j:
d += self._sum_distances(meanss[i], meanss[j])
if min_difference is None or d < min_difference:
min_difference, min_means = d, meanss[i]
# use the best means
self._means = min_means
|
https://github.com/nltk/nltk/issues/681
|
Traceback (most recent call last):
...
File "D:\SourceCode\voc\nltk\cluster\util.py", line 65, in cluster
self.cluster_vectorspace(vectors, trace)
File "D:\SourceCode\voc\nltk\cluster\kmeans.py", line 84, in cluster_vectorspace
self._means = self._rng.sample(vectors, self._num_means)
File "C:\Python32-64\lib\random.py", line 299, in sample
raise TypeError("Population must be a sequence or Set. For dicts, use list(d).")
TypeError: Population must be a sequence or Set. For dicts, use list(d).
|
TypeError
|
def __init__(self, tree, sentence=None, highlight=()):
if sentence is None:
leaves = tree.leaves()
if (
leaves
and not any(len(a) == 0 for a in tree.subtrees())
and all(isinstance(a, int) for a in leaves)
):
sentence = [str(a) for a in leaves]
else:
# this deals with empty nodes (frontier non-terminals)
# and multiple/mixed terminals under non-terminals.
tree = tree.copy(True)
sentence = []
for a in tree.subtrees():
if len(a) == 0:
a.append(len(sentence))
sentence.append(None)
elif any(not isinstance(b, Tree) for b in a):
for n, b in enumerate(a):
if not isinstance(b, Tree):
a[n] = len(sentence)
if type(b) == tuple:
b = "/".join(b)
sentence.append("%s" % b)
self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
tree, sentence, highlight
)
|
def __init__(self, tree, sentence=None, highlight=()):
if sentence is None:
leaves = tree.leaves()
if (
leaves
and not any(len(a) == 0 for a in tree.subtrees())
and all(isinstance(a, int) for a in leaves)
):
sentence = [str(a) for a in leaves]
else:
# this deals with empty nodes (frontier non-terminals)
# and multiple/mixed terminals under non-terminals.
tree = tree.copy(True)
sentence = []
for a in tree.subtrees():
if len(a) == 0:
a.append(len(sentence))
sentence.append(None)
elif any(not isinstance(b, Tree) for b in a):
for n, b in enumerate(a):
if not isinstance(b, Tree):
a[n] = len(sentence)
sentence.append("%s" % b)
self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
tree, sentence, highlight
)
|
https://github.com/nltk/nltk/issues/2102
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-32-306ca23c9095> in <module>()
7 NPChunker = nltk.RegexpParser(pattern)
8 result = NPChunker.parse(sentence)
----> 9 result.pretty_print()
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/nltk/tree.py in pretty_print(self, sentence, highlight, stream, **kwargs)
697 """
698 from nltk.treeprettyprinter import TreePrettyPrinter
--> 699 print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs),
700 file=stream)
701
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/nltk/treeprettyprinter.py in __init__(self, tree, sentence, highlight)
95 if not isinstance(b, Tree):
96 a[n] = len(sentence)
---> 97 sentence.append('%s' % b)
98 self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
99 tree, sentence, highlight)
TypeError: not all arguments converted during string formatting
|
TypeError
|
def __init__(self, tree, sentence=None, highlight=()):
if sentence is None:
leaves = tree.leaves()
if (
leaves
and not any(len(a) == 0 for a in tree.subtrees())
and all(isinstance(a, int) for a in leaves)
):
sentence = [str(a) for a in leaves]
else:
# this deals with empty nodes (frontier non-terminals)
# and multiple/mixed terminals under non-terminals.
tree = tree.copy(True)
sentence = []
for a in tree.subtrees():
if len(a) == 0:
a.append(len(sentence))
sentence.append(None)
elif any(not isinstance(b, Tree) for b in a):
for n, b in enumerate(a):
if not isinstance(b, Tree):
a[n] = len(sentence)
if type(b) == tuple:
b = "/".join(b)
sentence.append("%s" % b)
self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
tree, sentence, highlight
)
|
def __init__(self, tree, sentence=None, highlight=()):
if sentence is None:
leaves = tree.leaves()
if (
leaves
and not any(len(a) == 0 for a in tree.subtrees())
and all(isinstance(a, int) for a in leaves)
):
sentence = [str(a) for a in leaves]
else:
# this deals with empty nodes (frontier non-terminals)
# and multiple/mixed terminals under non-terminals.
tree = tree.copy(True)
sentence = []
for a in tree.subtrees():
if len(a) == 0:
a.append(len(sentence))
sentence.append(None)
elif any(not isinstance(b, Tree) for b in a):
for n, b in enumerate(a):
if not isinstance(b, Tree):
a[n] = len(sentence)
sentence.append("%s" % b)
self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
tree, sentence, highlight
)
|
https://github.com/nltk/nltk/issues/2102
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-32-306ca23c9095> in <module>()
7 NPChunker = nltk.RegexpParser(pattern)
8 result = NPChunker.parse(sentence)
----> 9 result.pretty_print()
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/nltk/tree.py in pretty_print(self, sentence, highlight, stream, **kwargs)
697 """
698 from nltk.treeprettyprinter import TreePrettyPrinter
--> 699 print(TreePrettyPrinter(self, sentence, highlight).text(**kwargs),
700 file=stream)
701
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/nltk/treeprettyprinter.py in __init__(self, tree, sentence, highlight)
95 if not isinstance(b, Tree):
96 a[n] = len(sentence)
---> 97 sentence.append('%s' % b)
98 self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
99 tree, sentence, highlight)
TypeError: not all arguments converted during string formatting
|
TypeError
|
def __init__(self, zipfile, entry=""):
"""
Create a new path pointer pointing at the specified entry
in the given zipfile.
:raise IOError: If the given zipfile does not exist, or if it
does not contain the specified entry.
"""
if isinstance(zipfile, string_types):
zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
# Normalize the entry string, it should be relative:
entry = normalize_resource_name(entry, True, "/").lstrip("/")
# Check that the entry exists:
if entry != ".":
try:
zipfile.getinfo(entry)
except Exception:
# Sometimes directories aren't explicitly listed in
# the zip file. So if `entry` is a directory name,
# then check if the zipfile contains any files that
# are under the given directory.
if entry.endswith("/") and [
n for n in zipfile.namelist() if n.startswith(entry)
]:
pass # zipfile contains a file in that directory.
else:
# Otherwise, complain.
raise IOError(
"Zipfile %r does not contain %r" % (zipfile.filename, entry)
)
self._zipfile = zipfile
self._entry = entry
|
def __init__(self, zipfile, entry=""):
"""
Create a new path pointer pointing at the specified entry
in the given zipfile.
:raise IOError: If the given zipfile does not exist, or if it
does not contain the specified entry.
"""
if isinstance(zipfile, string_types):
zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
# Normalize the entry string, it should be relative:
entry = normalize_resource_name(entry, True, "/").lstrip("/")
# Check that the entry exists:
if entry:
try:
zipfile.getinfo(entry)
except Exception:
# Sometimes directories aren't explicitly listed in
# the zip file. So if `entry` is a directory name,
# then check if the zipfile contains any files that
# are under the given directory.
if entry.endswith("/") and [
n for n in zipfile.namelist() if n.startswith(entry)
]:
pass # zipfile contains a file in that directory.
else:
# Otherwise, complain.
raise IOError(
"Zipfile %r does not contain %r" % (zipfile.filename, entry)
)
self._zipfile = zipfile
self._entry = entry
|
https://github.com/nltk/nltk/issues/1986
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\corpus\reader\twitter.py", line 74, in __init__
CorpusReader.__init__(self, root, fileids, encoding)
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\corpus\reader\api.py", line 82, in __init__
root = ZipFilePathPointer(zipfile, zipentry)
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\compat.py", line 221, in _decorator
return init_func(*args, **kwargs)
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\data.py", line 506, in __init__
(zipfile.filename, entry))
OSError: Zipfile 'C:\\Projects\\mfd\\nlp\\twitter_samples.zip' does not contain '.'
|
OSError
|
def __init__(self, zipfile, entry=""):
"""
Create a new path pointer pointing at the specified entry
in the given zipfile.
:raise IOError: If the given zipfile does not exist, or if it
does not contain the specified entry.
"""
if isinstance(zipfile, string_types):
zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
# Check that the entry exists:
if entry:
# Normalize the entry string, it should be relative:
entry = normalize_resource_name(entry, True, "/").lstrip("/")
try:
zipfile.getinfo(entry)
except Exception:
# Sometimes directories aren't explicitly listed in
# the zip file. So if `entry` is a directory name,
# then check if the zipfile contains any files that
# are under the given directory.
if entry.endswith("/") and [
n for n in zipfile.namelist() if n.startswith(entry)
]:
pass # zipfile contains a file in that directory.
else:
# Otherwise, complain.
raise IOError(
"Zipfile %r does not contain %r" % (zipfile.filename, entry)
)
self._zipfile = zipfile
self._entry = entry
|
def __init__(self, zipfile, entry=""):
"""
Create a new path pointer pointing at the specified entry
in the given zipfile.
:raise IOError: If the given zipfile does not exist, or if it
does not contain the specified entry.
"""
if isinstance(zipfile, string_types):
zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile))
# Normalize the entry string, it should be relative:
entry = normalize_resource_name(entry, True, "/").lstrip("/")
# Check that the entry exists:
if entry != ".":
try:
zipfile.getinfo(entry)
except Exception:
# Sometimes directories aren't explicitly listed in
# the zip file. So if `entry` is a directory name,
# then check if the zipfile contains any files that
# are under the given directory.
if entry.endswith("/") and [
n for n in zipfile.namelist() if n.startswith(entry)
]:
pass # zipfile contains a file in that directory.
else:
# Otherwise, complain.
raise IOError(
"Zipfile %r does not contain %r" % (zipfile.filename, entry)
)
self._zipfile = zipfile
self._entry = entry
|
https://github.com/nltk/nltk/issues/1986
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\corpus\reader\twitter.py", line 74, in __init__
CorpusReader.__init__(self, root, fileids, encoding)
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\corpus\reader\api.py", line 82, in __init__
root = ZipFilePathPointer(zipfile, zipentry)
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\compat.py", line 221, in _decorator
return init_func(*args, **kwargs)
File "C:\Apps\Tools\python\conda3\envs\nlp\lib\site-packages\nltk\data.py", line 506, in __init__
(zipfile.filename, entry))
OSError: Zipfile 'C:\\Projects\\mfd\\nlp\\twitter_samples.zip' does not contain '.'
|
OSError
|
def __Suffix_Verb_Step2a(self, token):
for suffix in self.__suffix_verb_step2a:
if token.endswith(suffix) and len(token) > 3:
if suffix == "\u062a" and len(token) >= 4:
token = token[:-1]
self.suffix_verb_step2a_success = True
break
if suffix in self.__conjugation_suffix_verb_4 and len(token) >= 4:
token = token[:-1]
self.suffix_verb_step2a_success = True
break
if suffix in self.__conjugation_suffix_verb_past and len(token) >= 5:
token = token[:-2] # past
self.suffix_verb_step2a_success = True
break
if suffix in self.__conjugation_suffix_verb_present and len(token) > 5:
token = token[:-2] # present
self.suffix_verb_step2a_success = True
break
if suffix == "\u062a\u0645\u0627" and len(token) >= 6:
token = token[:-3]
self.suffix_verb_step2a_success = True
break
return token
|
def __Suffix_Verb_Step2a(self, token):
for suffix in self.__suffix_verb_step2a:
if token.endswith(suffix):
if suffix == "\u062a" and len(token) >= 4:
token = token[:-1]
self.suffix_verb_step2a_success = True
break
if suffix in self.__conjugation_suffix_verb_4 and len(token) >= 4:
token = token[:-1]
self.suffix_verb_step2a_success = True
break
if suffix in self.__conjugation_suffix_verb_past and len(token) >= 5:
token = token[:-2] # past
self.suffix_verb_step2a_success = True
break
if suffix in self.__conjugation_suffix_verb_present and len(token) > 5:
token = token[:-2] # present
self.suffix_verb_step2a_success = True
break
if suffix == "\u062a\u0645\u0627" and len(token) >= 6:
token = token[:-3]
self.suffix_verb_step2a_success = True
break
return token
|
https://github.com/nltk/nltk/issues/1852
|
(anaconda2-4.4.0) richard-balmer-macbook:~ richardbalmer$ pip freeze | grep nltk
nltk==3.2.5
(anaconda2-4.4.0) richard-balmer-macbook:~ richardbalmer$ ipython
Python 2.7.13 |Anaconda custom (x86_64)| (default, Dec 20 2016, 23:05:08)
Type "copyright", "credits" or "license" for more information.
IPython 5.3.0 -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
In [1]: from nltk.stem.snowball import SnowballStemmer
In [2]: stemmer = SnowballStemmer('arabic')
In [3]: stemmer.stem(u'تسدد')
Out[3]: u'\u062a\u0633\u062f\u062f'
In [4]: stemmer.stem(u'من')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-ffa733106049> in <module>()
----> 1 stemmer.stem(u'من')
/Users/richardbalmer/.pyenv/versions/anaconda2-4.4.0/lib/python2.7/site-packages/nltk/stem/snowball.pyc in stem(self, word)
762 modified_word = self.__Suffix_Verb_Step2b(modified_word)
763 if not self.suffix_verb_step2b_success:
--> 764 modified_word = self.__Suffix_Verb_Step2a(modified_word)
765 if self.is_noun:
766 modified_word = self.__Suffix_Noun_Step2c2(modified_word)
/Users/richardbalmer/.pyenv/versions/anaconda2-4.4.0/lib/python2.7/site-packages/nltk/stem/snowball.pyc in __Suffix_Verb_Step2a(self, token)
533 break
534
--> 535 if suffix in self.__conjugation_suffix_verb_present and len(token) > 5:
536 token = token[:-2] # present
537 self.suffix_verb_step2a_success = True
AttributeError: 'ArabicStemmer' object has no attribute '_ArabicStemmer__conjugation_suffix_verb_present'
|
AttributeError
|
def stem(self, word):
"""
Stem an Arabic word and return the stemmed form.
:param word: string
:return: string
"""
# set initial values
self.is_verb = True
self.is_noun = True
self.is_defined = False
self.suffix_verb_step2a_success = False
self.suffix_verb_step2b_success = False
self.suffix_noun_step2c2_success = False
self.suffix_noun_step1a_success = False
self.suffix_noun_step2a_success = False
self.suffix_noun_step2b_success = False
self.suffixe_noun_step1b_success = False
self.prefix_step2a_success = False
self.prefix_step3a_noun_success = False
self.prefix_step3b_noun_success = False
modified_word = word
# guess type and properties
# checks1
self.__checks_1(modified_word)
# checks2
self.__checks_2(modified_word)
# Pre_Normalization
modified_word = self.__normalize_pre(modified_word)
# Avoid stopwords
if modified_word in self.stopwords or len(modified_word) <= 2:
return modified_word
# Start stemming
if self.is_verb:
modified_word = self.__Suffix_Verb_Step1(modified_word)
if self.suffixes_verb_step1_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if not self.suffix_verb_step2a_success:
modified_word = self.__Suffix_Verb_Step2c(modified_word)
# or next TODO: How to deal with or next instruction
else:
modified_word = self.__Suffix_Verb_Step2b(modified_word)
if not self.suffix_verb_step2b_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if self.is_noun:
modified_word = self.__Suffix_Noun_Step2c2(modified_word)
if not self.suffix_noun_step2c2_success:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step1a(modified_word)
# if self.suffix_noun_step1a_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
# or next ? todo : how to deal with or next
else:
modified_word = self.__Suffix_Noun_Step1b(modified_word)
if self.suffixe_noun_step1b_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
else:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
modified_word = self.__Suffix_Noun_Step2b(modified_word)
modified_word = self.__Suffix_Noun_Step3(modified_word)
if not self.is_noun and self.is_verb:
modified_word = self.__Suffix_All_alef_maqsura(modified_word)
# prefixes
modified_word = self.__Prefix_Step1(modified_word)
modified_word = self.__Prefix_Step2a(modified_word)
if not self.prefix_step2a_success:
modified_word = self.__Prefix_Step2b(modified_word)
modified_word = self.__Prefix_Step3a_Noun(modified_word)
if not self.prefix_step3a_noun_success and self.is_noun:
modified_word = self.__Prefix_Step3b_Noun(modified_word)
else:
if not self.prefix_step3b_noun_success and self.is_verb:
modified_word = self.__Prefix_Step3_Verb(modified_word)
modified_word = self.__Prefix_Step4_Verb(modified_word)
# post normalization stemming
modified_word = self.__normalize_post(modified_word)
stemmed_word = modified_word
return stemmed_word
|
def stem(self, word):
"""
Stem an Arabic word and return the stemmed form.
:param word: string
:return: string
"""
# set initial values
self.is_verb = True
self.is_noun = True
self.is_defined = False
self.suffix_verb_step2a_success = False
self.suffix_verb_step2b_success = False
self.suffix_noun_step2c2_success = False
self.suffix_noun_step1a_success = False
self.suffix_noun_step2a_success = False
self.suffix_noun_step2b_success = False
self.suffixe_noun_step1b_success = False
self.prefix_step2a_success = False
self.prefix_step3a_noun_success = False
self.prefix_step3b_noun_success = False
modified_word = word
# guess type and properties
# checks1
self.__checks_1(modified_word)
# checks2
self.__checks_2(modified_word)
modified_word = self.__normalize_pre(modified_word)
if self.is_verb:
modified_word = self.__Suffix_Verb_Step1(modified_word)
if self.suffixes_verb_step1_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if not self.suffix_verb_step2a_success:
modified_word = self.__Suffix_Verb_Step2c(modified_word)
# or next
else:
modified_word = self.__Suffix_Verb_Step2b(modified_word)
if not self.suffix_verb_step2b_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if self.is_noun:
modified_word = self.__Suffix_Noun_Step2c2(modified_word)
if not self.suffix_noun_step2c2_success:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step1a(modified_word)
# if self.suffix_noun_step1a_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
# or next ? todo : how to deal with or next
else:
modified_word = self.__Suffix_Noun_Step1b(modified_word)
if self.suffixe_noun_step1b_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
else:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
modified_word = self.__Suffix_Noun_Step2b(modified_word)
modified_word = self.__Suffix_Noun_Step3(modified_word)
if not self.is_noun and self.is_verb:
modified_word = self.__Suffix_All_alef_maqsura(modified_word)
# prefixes
modified_word = self.__Prefix_Step1(modified_word)
modified_word = self.__Prefix_Step2a(modified_word)
if not self.prefix_step2a_success:
modified_word = self.__Prefix_Step2b(modified_word)
modified_word = self.__Prefix_Step3a_Noun(modified_word)
if not self.prefix_step3a_noun_success and self.is_noun:
modified_word = self.__Prefix_Step3b_Noun(modified_word)
else:
if not self.prefix_step3b_noun_success and self.is_verb:
modified_word = self.__Prefix_Step3_Verb(modified_word)
modified_word = self.__Prefix_Step4_Verb(modified_word)
# post normalization stemming
modified_word = self.__normalize_post(modified_word)
stemmed_word = modified_word
return stemmed_word
|
https://github.com/nltk/nltk/issues/1852
|
(anaconda2-4.4.0) richard-balmer-macbook:~ richardbalmer$ pip freeze | grep nltk
nltk==3.2.5
(anaconda2-4.4.0) richard-balmer-macbook:~ richardbalmer$ ipython
Python 2.7.13 |Anaconda custom (x86_64)| (default, Dec 20 2016, 23:05:08)
Type "copyright", "credits" or "license" for more information.
IPython 5.3.0 -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
In [1]: from nltk.stem.snowball import SnowballStemmer
In [2]: stemmer = SnowballStemmer('arabic')
In [3]: stemmer.stem(u'تسدد')
Out[3]: u'\u062a\u0633\u062f\u062f'
In [4]: stemmer.stem(u'من')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-ffa733106049> in <module>()
----> 1 stemmer.stem(u'من')
/Users/richardbalmer/.pyenv/versions/anaconda2-4.4.0/lib/python2.7/site-packages/nltk/stem/snowball.pyc in stem(self, word)
762 modified_word = self.__Suffix_Verb_Step2b(modified_word)
763 if not self.suffix_verb_step2b_success:
--> 764 modified_word = self.__Suffix_Verb_Step2a(modified_word)
765 if self.is_noun:
766 modified_word = self.__Suffix_Noun_Step2c2(modified_word)
/Users/richardbalmer/.pyenv/versions/anaconda2-4.4.0/lib/python2.7/site-packages/nltk/stem/snowball.pyc in __Suffix_Verb_Step2a(self, token)
533 break
534
--> 535 if suffix in self.__conjugation_suffix_verb_present and len(token) > 5:
536 token = token[:-2] # present
537 self.suffix_verb_step2a_success = True
AttributeError: 'ArabicStemmer' object has no attribute '_ArabicStemmer__conjugation_suffix_verb_present'
|
AttributeError
|
def stem(self, word):
"""
Stem an Arabic word and return the stemmed form.
:param word: string
:return: string
"""
# set initial values
self.is_verb = True
self.is_noun = True
self.is_defined = False
self.suffix_verb_step2a_success = False
self.suffix_verb_step2b_success = False
self.suffix_noun_step2c2_success = False
self.suffix_noun_step1a_success = False
self.suffix_noun_step2a_success = False
self.suffix_noun_step2b_success = False
self.suffixe_noun_step1b_success = False
self.prefix_step2a_success = False
self.prefix_step3a_noun_success = False
self.prefix_step3b_noun_success = False
modified_word = word
# guess type and properties
# checks1
self.__checks_1(modified_word)
# checks2
self.__checks_2(modified_word)
# Pre_Normalization
modified_word = self.__normalize_pre(modified_word)
# Avoid stopwords
if modified_word in self.stopwords or len(modified_word) <= 2:
return modified_word
# Start stemming
if self.is_verb:
modified_word = self.__Suffix_Verb_Step1(modified_word)
if self.suffixes_verb_step1_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if not self.suffix_verb_step2a_success:
modified_word = self.__Suffix_Verb_Step2c(modified_word)
# or next TODO: How to deal with or next instruction
else:
modified_word = self.__Suffix_Verb_Step2b(modified_word)
if not self.suffix_verb_step2b_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if self.is_noun:
modified_word = self.__Suffix_Noun_Step2c2(modified_word)
if not self.suffix_noun_step2c2_success:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step1a(modified_word)
# if self.suffix_noun_step1a_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
# or next ? todo : how to deal with or next
else:
modified_word = self.__Suffix_Noun_Step1b(modified_word)
if self.suffixe_noun_step1b_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
else:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
modified_word = self.__Suffix_Noun_Step2b(modified_word)
modified_word = self.__Suffix_Noun_Step3(modified_word)
if not self.is_noun and self.is_verb:
modified_word = self.__Suffix_All_alef_maqsura(modified_word)
# prefixes
modified_word = self.__Prefix_Step1(modified_word)
modified_word = self.__Prefix_Step2a(modified_word)
if not self.prefix_step2a_success:
modified_word = self.__Prefix_Step2b(modified_word)
modified_word = self.__Prefix_Step3a_Noun(modified_word)
if not self.prefix_step3a_noun_success and self.is_noun:
modified_word = self.__Prefix_Step3b_Noun(modified_word)
else:
if not self.prefix_step3b_noun_success and self.is_verb:
modified_word = self.__Prefix_Step3_Verb(modified_word)
modified_word = self.__Prefix_Step4_Verb(modified_word)
# post normalization stemming
modified_word = self.__normalize_post(modified_word)
stemmed_word = modified_word
return stemmed_word
|
def stem(self, word):
"""
Stem an Arabic word and return the stemmed form.
:param word: string
:return: string
"""
# set initial values
self.is_verb = True
self.is_noun = True
self.is_defined = False
self.suffix_verb_step2a_success = False
self.suffix_verb_step2b_success = False
self.suffix_noun_step2c2_success = False
self.suffix_noun_step1a_success = False
self.suffix_noun_step2a_success = False
self.suffix_noun_step2b_success = False
self.suffixe_noun_step1b_success = False
self.prefix_step2a_success = False
self.prefix_step3a_noun_success = False
self.prefix_step3b_noun_success = False
modified_word = word
# guess type and properties
# checks1
self.__checks_1(modified_word)
# checks2
self.__checks_2(modified_word)
modified_word = self.__normalize_pre(modified_word)
if self.is_verb:
modified_word = self.__Suffix_Verb_Step1(modified_word)
if self.suffixes_verb_step1_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if not self.suffix_verb_step2a_success:
modified_word = self.__Suffix_Verb_Step2c(modified_word)
# or next
else:
modified_word = self.__Suffix_Verb_Step2b(modified_word)
if not self.suffix_verb_step2b_success:
modified_word = self.__Suffix_Verb_Step2a(modified_word)
if self.is_noun:
modified_word = self.__Suffix_Noun_Step2c2(modified_word)
if not self.suffix_noun_step2c2_success:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step1a(modified_word)
# if self.suffix_noun_step1a_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
# or next ? todo : how to deal with or next
else:
modified_word = self.__Suffix_Noun_Step1b(modified_word)
if self.suffixe_noun_step1b_success:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
if not self.suffix_noun_step2a_success:
modified_word = self.__Suffix_Noun_Step2b(modified_word)
if (
not self.suffix_noun_step2b_success
and not self.suffix_noun_step2a_success
):
modified_word = self.__Suffix_Noun_Step2c1(modified_word)
else:
if not self.is_defined:
modified_word = self.__Suffix_Noun_Step2a(modified_word)
modified_word = self.__Suffix_Noun_Step2b(modified_word)
modified_word = self.__Suffix_Noun_Step3(modified_word)
if not self.is_noun and self.is_verb:
modified_word = self.__Suffix_All_alef_maqsura(modified_word)
# prefixes
modified_word = self.__Prefix_Step1(modified_word)
modified_word = self.__Prefix_Step2a(modified_word)
if not self.prefix_step2a_success:
modified_word = self.__Prefix_Step2b(modified_word)
modified_word = self.__Prefix_Step3a_Noun(modified_word)
if not self.prefix_step3a_noun_success and self.is_noun:
modified_word = self.__Prefix_Step3b_Noun(modified_word)
else:
if not self.prefix_step3b_noun_success and self.is_verb:
modified_word = self.__Prefix_Step3_Verb(modified_word)
modified_word = self.__Prefix_Step4_Verb(modified_word)
# post normalization stemming
modified_word = self.__normalize_post(modified_word)
stemmed_word = modified_word
return stemmed_word
|
https://github.com/nltk/nltk/issues/1852
|
(anaconda2-4.4.0) richard-balmer-macbook:~ richardbalmer$ pip freeze | grep nltk
nltk==3.2.5
(anaconda2-4.4.0) richard-balmer-macbook:~ richardbalmer$ ipython
Python 2.7.13 |Anaconda custom (x86_64)| (default, Dec 20 2016, 23:05:08)
Type "copyright", "credits" or "license" for more information.
IPython 5.3.0 -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
In [1]: from nltk.stem.snowball import SnowballStemmer
In [2]: stemmer = SnowballStemmer('arabic')
In [3]: stemmer.stem(u'تسدد')
Out[3]: u'\u062a\u0633\u062f\u062f'
In [4]: stemmer.stem(u'من')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-ffa733106049> in <module>()
----> 1 stemmer.stem(u'من')
/Users/richardbalmer/.pyenv/versions/anaconda2-4.4.0/lib/python2.7/site-packages/nltk/stem/snowball.pyc in stem(self, word)
762 modified_word = self.__Suffix_Verb_Step2b(modified_word)
763 if not self.suffix_verb_step2b_success:
--> 764 modified_word = self.__Suffix_Verb_Step2a(modified_word)
765 if self.is_noun:
766 modified_word = self.__Suffix_Noun_Step2c2(modified_word)
/Users/richardbalmer/.pyenv/versions/anaconda2-4.4.0/lib/python2.7/site-packages/nltk/stem/snowball.pyc in __Suffix_Verb_Step2a(self, token)
533 break
534
--> 535 if suffix in self.__conjugation_suffix_verb_present and len(token) > 5:
536 token = token[:-2] # present
537 self.suffix_verb_step2a_success = True
AttributeError: 'ArabicStemmer' object has no attribute '_ArabicStemmer__conjugation_suffix_verb_present'
|
AttributeError
|
def train(self, sentences, save_loc=None, nr_iter=5):
"""Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list or iterator of sentences, where each sentence
is a list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
"""
# We'd like to allow ``sentences`` to be either a list or an iterator,
# the latter being especially important for a large training dataset.
# Because ``self._make_tagdict(sentences)`` runs regardless, we make
# it populate ``self._sentences`` (a list) with all the sentences.
# This saves the overheard of just iterating through ``sentences`` to
# get the list by ``sentences = list(sentences)``.
self._sentences = list() # to be populated by self._make_tagdict...
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in self._sentences:
words, tags = zip(*sentence)
prev, prev2 = self.START
context = self.START + [self.normalize(w) for w in words] + self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(self._sentences)
logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
# We don't need the training sentences anymore, and we don't want to
# waste space on them when we pickle the trained tagger.
self._sentences = None
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
with open(save_loc, "wb") as fout:
# changed protocol from -1 to 2 to make pickling Python 2 compatible
pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2)
|
def train(self, sentences, save_loc=None, nr_iter=5):
"""Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
"""
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for sentence in sentences:
words = [word for word, tag in sentence]
tags = [tag for word, tag in sentence]
prev, prev2 = self.START
context = self.START + [self.normalize(w) for w in words] + self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(sentences)
logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
with open(save_loc, "wb") as fout:
# changed protocol from -1 to 2 to make pickling Python 2 compatible
pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2)
|
https://github.com/nltk/nltk/issues/1486
|
from nltk.tag import PerceptronTagger
from nltk.corpus import alpino as alp
training_corpus = alp.tagged_sents()
tagger = PerceptronTagger(load=False)
tagger.train(training_corpus)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Library/Python/2.7/site-packages/nltk/tag/perceptron.py", line 194, in train
random.shuffle(sentences)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/random.py", line 291, in shuffle
x[i], x[j] = x[j], x[i]
TypeError: 'StreamBackedCorpusView' object does not support item assignment
|
TypeError
|
def _make_tagdict(self, sentences):
"""
Make a tag dictionary for single-tag words.
:param sentences: A list of list of (word, tag) tuples.
"""
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
self._sentences.append(sentence)
for word, tag in sentence:
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
self.tagdict[word] = tag
|
def _make_tagdict(self, sentences):
"""
Make a tag dictionary for single-tag words.
:param sentences: A list of list of (word, tag) tuples.
"""
counts = defaultdict(lambda: defaultdict(int))
for sentence in sentences:
for word, tag in sentence:
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (mode / n) >= ambiguity_thresh:
self.tagdict[word] = tag
|
https://github.com/nltk/nltk/issues/1486
|
from nltk.tag import PerceptronTagger
from nltk.corpus import alpino as alp
training_corpus = alp.tagged_sents()
tagger = PerceptronTagger(load=False)
tagger.train(training_corpus)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Library/Python/2.7/site-packages/nltk/tag/perceptron.py", line 194, in train
random.shuffle(sentences)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/random.py", line 291, in shuffle
x[i], x[j] = x[j], x[i]
TypeError: 'StreamBackedCorpusView' object does not support item assignment
|
TypeError
|
def __init__(self, strings=None):
"""Builds a Trie object, which is built around a ``dict``
If ``strings`` is provided, it will add the ``strings``, which
consist of a ``list`` of ``strings``, to the Trie.
Otherwise, it'll construct an empty Trie.
:param strings: List of strings to insert into the trie
(Default is ``None``)
:type strings: list(str)
"""
super(Trie, self).__init__()
if strings:
for string in strings:
self.insert(string)
|
def __init__(self, strings=None):
"""Builds a Trie object, which is built around a ``defaultdict``
If ``strings`` is provided, it will add the ``strings``, which
consist of a ``list`` of ``strings``, to the Trie.
Otherwise, it'll construct an empty Trie.
:param strings: List of strings to insert into the trie
(Default is ``None``)
:type strings: list(str)
"""
defaultdict.__init__(self, Trie)
if strings:
for string in strings:
self.insert(string)
|
https://github.com/nltk/nltk/issues/1761
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-47-205693e12b16> in <module>()
1 with open('tokenizer.pkl', 'rb') as f:
----> 2 t = pickle.load(f)
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(file)
1382
1383 def load(file):
-> 1384 return Unpickler(file).load()
1385
1386 def loads(str):
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(self)
862 while 1:
863 key = read(1)
--> 864 dispatch[key](self)
865 except _Stop, stopinst:
866 return stopinst.value
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load_reduce(self)
1137 args = stack.pop()
1138 func = stack[-1]
-> 1139 value = func(*args)
1140 stack[-1] = value
1141 dispatch[REDUCE] = load_reduce
/Users/vinay/Sites/nltk_mwe/.env/lib/python2.7/site-packages/nltk/collections.pyc in __init__(self, strings)
611 defaultdict.__init__(self, Trie)
612 if strings:
--> 613 for string in strings:
614 self.insert(string)
615
TypeError: 'type' object is not iterable
|
TypeError
|
def insert(self, string):
"""Inserts ``string`` into the Trie
:param string: String to insert into the trie
:type string: str
:Example:
>>> from nltk.collections import Trie
>>> trie = Trie(["abc", "def"])
>>> expected = {'a': {'b': {'c': {True: None}}}, \
'd': {'e': {'f': {True: None}}}}
>>> trie == expected
True
"""
if len(string):
self[string[0]].insert(string[1:])
else:
# mark the string is complete
self[Trie.LEAF] = None
|
def insert(self, string):
"""Inserts ``string`` into the Trie
:param string: String to insert into the trie
:type string: str
:Example:
>>> from nltk.collections import Trie
>>> trie = Trie(["ab"])
>>> trie
defaultdict(<class 'nltk.collections.Trie'>, {'a': defaultdict(<class 'nltk.collections.Trie'>, {'b': defaultdict(<class 'nltk.collections.Trie'>, {True: None})})})
"""
if len(string):
self[string[0]].insert(string[1:])
else:
# mark the string is complete
self[Trie.LEAF] = None
|
https://github.com/nltk/nltk/issues/1761
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-47-205693e12b16> in <module>()
1 with open('tokenizer.pkl', 'rb') as f:
----> 2 t = pickle.load(f)
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(file)
1382
1383 def load(file):
-> 1384 return Unpickler(file).load()
1385
1386 def loads(str):
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(self)
862 while 1:
863 key = read(1)
--> 864 dispatch[key](self)
865 except _Stop, stopinst:
866 return stopinst.value
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load_reduce(self)
1137 args = stack.pop()
1138 func = stack[-1]
-> 1139 value = func(*args)
1140 stack[-1] = value
1141 dispatch[REDUCE] = load_reduce
/Users/vinay/Sites/nltk_mwe/.env/lib/python2.7/site-packages/nltk/collections.pyc in __init__(self, strings)
611 defaultdict.__init__(self, Trie)
612 if strings:
--> 613 for string in strings:
614 self.insert(string)
615
TypeError: 'type' object is not iterable
|
TypeError
|
def __missing__(self, key):
self[key] = Trie()
return self[key]
|
def __missing__(self, key):
if not self._default_factory and key not in self._keys:
raise KeyError()
return self._default_factory()
|
https://github.com/nltk/nltk/issues/1761
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-47-205693e12b16> in <module>()
1 with open('tokenizer.pkl', 'rb') as f:
----> 2 t = pickle.load(f)
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(file)
1382
1383 def load(file):
-> 1384 return Unpickler(file).load()
1385
1386 def loads(str):
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(self)
862 while 1:
863 key = read(1)
--> 864 dispatch[key](self)
865 except _Stop, stopinst:
866 return stopinst.value
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load_reduce(self)
1137 args = stack.pop()
1138 func = stack[-1]
-> 1139 value = func(*args)
1140 stack[-1] = value
1141 dispatch[REDUCE] = load_reduce
/Users/vinay/Sites/nltk_mwe/.env/lib/python2.7/site-packages/nltk/collections.pyc in __init__(self, strings)
611 defaultdict.__init__(self, Trie)
612 if strings:
--> 613 for string in strings:
614 self.insert(string)
615
TypeError: 'type' object is not iterable
|
TypeError
|
def add_mwe(self, mwe):
"""Add a multi-word expression to the lexicon (stored as a word trie)
We use ``util.Trie`` to represent the trie. Its form is a dict of dicts.
The key True marks the end of a valid MWE.
:param mwe: The multi-word expression we're adding into the word trie
:type mwe: tuple(str) or list(str)
:Example:
>>> tokenizer = MWETokenizer()
>>> tokenizer.add_mwe(('a', 'b'))
>>> tokenizer.add_mwe(('a', 'b', 'c'))
>>> tokenizer.add_mwe(('a', 'x'))
>>> expected = {'a': {'x': {True: None}, 'b': {True: None, 'c': {True: None}}}}
>>> tokenizer._mwes == expected
True
"""
self._mwes.insert(mwe)
|
def add_mwe(self, mwe):
"""Add a multi-word expression to the lexicon (stored as a word trie)
We use ``util.Trie`` to represent the trie. Its form is a dict of dicts.
The key True marks the end of a valid MWE.
:param mwe: The multi-word expression we're adding into the word trie
:type mwe: tuple(str) or list(str)
:Example:
>>> tokenizer = MWETokenizer()
>>> tokenizer.add_mwe(('a', 'b'))
>>> tokenizer.add_mwe(('a', 'b', 'c'))
>>> tokenizer.add_mwe(('a', 'x'))
>>> expected = {'a': {'x': {True: None}, 'b': {True: None, 'c': {True: None}}}}
>>> tokenizer._mwes.as_dict() == expected
True
"""
self._mwes.insert(mwe)
|
https://github.com/nltk/nltk/issues/1761
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-47-205693e12b16> in <module>()
1 with open('tokenizer.pkl', 'rb') as f:
----> 2 t = pickle.load(f)
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(file)
1382
1383 def load(file):
-> 1384 return Unpickler(file).load()
1385
1386 def loads(str):
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load(self)
862 while 1:
863 key = read(1)
--> 864 dispatch[key](self)
865 except _Stop, stopinst:
866 return stopinst.value
/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pickle.pyc in load_reduce(self)
1137 args = stack.pop()
1138 func = stack[-1]
-> 1139 value = func(*args)
1140 stack[-1] = value
1141 dispatch[REDUCE] = load_reduce
/Users/vinay/Sites/nltk_mwe/.env/lib/python2.7/site-packages/nltk/collections.pyc in __init__(self, strings)
611 defaultdict.__init__(self, Trie)
612 if strings:
--> 613 for string in strings:
614 self.insert(string)
615
TypeError: 'type' object is not iterable
|
TypeError
|
def has_numeric_only(self, text):
return bool(re.search(r"(.*)[\s]+(\#NUMERIC_ONLY\#)", text))
|
def has_numeric_only(self, text):
return bool(re.match(r"(.*)[\s]+(\#NUMERIC_ONLY\#)", text))
|
https://github.com/nltk/nltk/issues/1551
|
$ python -c 'from nltk.tokenize.moses import MosesTokenizer; m = MosesTokenizer(); m.penn_tokenize("this aint funny")'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "nltk/tokenize/moses.py", line 299, in penn_tokenize
text = re.sub(regexp, subsitution, text)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py", line 155, in sub
return _compile(pattern, flags).sub(repl, string, count)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py", line 251, in _compile
raise error, v # invalid expression
sre_constants.error: unbalanced parenthesis
|
sre_constants.error
|
def handles_nonbreaking_prefixes(self, text):
# Splits the text into tokens to check for nonbreaking prefixes.
tokens = text.split()
num_tokens = len(tokens)
for i, token in enumerate(tokens):
# Checks if token ends with a fullstop.
token_ends_with_period = re.search(r"^(\S+)\.$", text)
if token_ends_with_period:
prefix = token_ends_with_period.group(0)
# Checks for 3 conditions if
# i. the prefix is a token made up of chars within the IsAlpha
# ii. the prefix is in the list of nonbreaking prefixes and
# does not contain #NUMERIC_ONLY#
# iii. the token is not the last token and that the
# next token contains all lowercase.
if (
(prefix and self.isalpha(prefix))
or (
prefix in self.NONBREAKING_PREFIXES
and prefix not in self.NUMERIC_ONLY_PREFIXES
)
or (i != num_tokens - 1 and self.islower(tokens[i + 1]))
):
pass # No change to the token.
# Checks if the prefix is in NUMERIC_ONLY_PREFIXES
# and ensures that the next word is a digit.
elif prefix in self.NUMERIC_ONLY_PREFIXES and re.search(
r"^[0-9]+", token[i + 1]
):
pass # No change to the token.
else: # Otherwise, adds a space after the tokens before a dot.
tokens[i] = prefix + " ."
return " ".join(tokens) # Stitch the tokens back.
|
def handles_nonbreaking_prefixes(self, text):
# Splits the text into toknes to check for nonbreaking prefixes.
tokens = text.split()
num_tokens = len(tokens)
for i, token in enumerate(tokens):
# Checks if token ends with a fullstop.
token_ends_with_period = re.match(r"^(\S+)\.$", text)
if token_ends_with_period:
prefix = token_ends_with_period.group(0)
# Checks for 3 conditions if
# i. the prefix is a token made up of chars within the IsAlpha
# ii. the prefix is in the list of nonbreaking prefixes and
# does not contain #NUMERIC_ONLY#
# iii. the token is not the last token and that the
# next token contains all lowercase.
if (
(prefix and self.isalpha(prefix))
or (
prefix in self.NONBREAKING_PREFIXES
and prefix not in self.NUMERIC_ONLY_PREFIXES
)
or (i != num_tokens - 1 and self.islower(tokens[i + 1]))
):
pass # No change to the token.
# Checks if the prefix is in NUMERIC_ONLY_PREFIXES
# and ensures that the next word is a digit.
elif prefix in self.NUMERIC_ONLY_PREFIXES and re.match(
r"^[0-9]+", token[i + 1]
):
pass # No change to the token.
else: # Otherwise, adds a space after the tokens before a dot.
tokens[i] = prefix + " ."
return " ".join(tokens) # Stitch the tokens back.
|
https://github.com/nltk/nltk/issues/1551
|
$ python -c 'from nltk.tokenize.moses import MosesTokenizer; m = MosesTokenizer(); m.penn_tokenize("this aint funny")'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "nltk/tokenize/moses.py", line 299, in penn_tokenize
text = re.sub(regexp, subsitution, text)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py", line 155, in sub
return _compile(pattern, flags).sub(repl, string, count)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py", line 251, in _compile
raise error, v # invalid expression
sre_constants.error: unbalanced parenthesis
|
sre_constants.error
|
def escape_xml(self, text):
for regexp, substitution in self.MOSES_ESCAPE_XML_REGEXES:
text = re.sub(regexp, substitution, text)
return text
|
def escape_xml(self, text):
for regexp, subsitution in self.MOSES_ESCAPE_XML_REGEXES:
text = re.sub(regexp, subsitution, text)
return text
|
https://github.com/nltk/nltk/issues/1551
|
$ python -c 'from nltk.tokenize.moses import MosesTokenizer; m = MosesTokenizer(); m.penn_tokenize("this aint funny")'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "nltk/tokenize/moses.py", line 299, in penn_tokenize
text = re.sub(regexp, subsitution, text)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py", line 155, in sub
return _compile(pattern, flags).sub(repl, string, count)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py", line 251, in _compile
raise error, v # invalid expression
sre_constants.error: unbalanced parenthesis
|
sre_constants.error
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.