after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def get_param_cached(self, key):
resolved_key = rospy.names.resolve_name(key)
try:
# check for value in the parameter server cache
return rospy.impl.paramserver.get_param_server_cache().get(resolved_key)
except KeyError:
# first access, make call to parameter server
code, msg, value = self.target.subscribeParam(
rospy.names.get_caller_id(), rospy.core.get_node_uri(), resolved_key
)
if code != 1: # unwrap value with Python semantics
raise KeyError(key)
# set the value in the cache so that it's marked as subscribed
rospy.impl.paramserver.get_param_server_cache().set(resolved_key, value)
if isinstance(value, dict) and not value:
raise KeyError(key)
return value
|
def get_param_cached(self, key):
resolved_key = rospy.names.resolve_name(key)
try:
# check for value in the parameter server cache
return rospy.impl.paramserver.get_param_server_cache().get(resolved_key)
except KeyError:
# first access, make call to parameter server
with self._lock:
code, msg, value = self.target.subscribeParam(
rospy.names.get_caller_id(), rospy.core.get_node_uri(), resolved_key
)
if code != 1: # unwrap value with Python semantics
raise KeyError(key)
# set the value in the cache so that it's marked as subscribed
rospy.impl.paramserver.get_param_server_cache().set(resolved_key, value)
if isinstance(value, dict) and not value:
raise KeyError(key)
return value
|
https://github.com/ros/ros_comm/issues/1913
|
[rospy.rosout][ERROR] 2020-03-18 22:57:59,303: Unable to report rosout: __exit__
Traceback (most recent call last):
File "/home/robomaker/workspace/applications/robot-application/dependencies/opt/ros/melodic/lib/python2.7/dist-packages/rospy/impl/rosout.py", line 91, in _rosout
disable_topics_ = rospy.get_param_cached("/rosout_disable_topics_generation", False)
File "/home/robomaker/workspace/applications/robot-application/dependencies/opt/ros/melodic/lib/python2.7/dist-packages/rospy/client.py", line 490, in get_param_cached
return _param_server.get_param_cached(param_name)
File "/home/robomaker/workspace/applications/robot-application/dependencies/opt/ros/melodic/lib/python2.7/dist-packages/rospy/msproxy.py", line 162, in get_param_cached
with self._lock:
AttributeError: __exit__
|
AttributeError
|
def _rostopic_list_group_by_host(master, pubs, subs):
"""
Build up maps for hostname to topic list per hostname
:returns: publishers host map, subscribers host map, ``{str: set(str)}, {str: set(str)}``
"""
def build_map(master, state, uricache):
tmap = {}
for topic, ttype, tnodes in state:
for p in tnodes:
if not p in uricache:
uricache[p] = master.lookupNode(p)
uri = uricache[p]
puri = urlparse(uri)
if not puri.hostname in tmap:
tmap[puri.hostname] = []
# recreate the system state data structure, but for a single host
matches = [l for x, _, l in tmap[puri.hostname] if x == topic]
if matches:
matches[0].append(p)
else:
tmap[puri.hostname].append((topic, ttype, [p]))
return tmap
uricache = {}
host_pub_topics = build_map(master, pubs, uricache)
host_sub_topics = build_map(master, subs, uricache)
return host_pub_topics, host_sub_topics
|
def _rostopic_list_group_by_host(master, pubs, subs):
"""
Build up maps for hostname to topic list per hostname
:returns: publishers host map, subscribers host map, ``{str: set(str)}, {str: set(str)}``
"""
def build_map(master, state, uricache):
tmap = {}
for topic, ttype, tnodes in state:
for p in tnodes:
if not p in uricache:
uricache[p] = master.lookupNode(p)
uri = uricache[p]
puri = urlparse(uri)
if not puri.hostname in tmap:
tmap[puri.hostname] = []
# recreate the system state data structure, but for a single host
matches = [l for x, l in tmap[puri.hostname] if x == topic]
if matches:
matches[0].append(p)
else:
tmap[puri.hostname].append((topic, [p]))
return tmap
uricache = {}
host_pub_topics = build_map(master, pubs, uricache)
host_sub_topics = build_map(master, subs, uricache)
return host_pub_topics, host_sub_topics
|
https://github.com/ros/ros_comm/issues/1758
|
Host [vboxros]:
Traceback (most recent call last):
File "/opt/ros/melodic/bin/rostopic", line 35, in <module>
rostopic.rostopicmain()
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 2119, in rostopicmain
_rostopic_cmd_list(argv)
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 2059, in _rostopic_cmd_list
exitval = _rostopic_list(topic, verbose=options.verbose, subscribers_only=options.subscribers, publishers_only=options.publishers, group_by_host=options.hostname) or 0
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 1242, in _rostopic_list
verbose, indent=' ')
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 1150, in _sub_rostopic_list
topics = list(set([t for t, _, _ in pubs] + [t for t, _, _ in subs]))
ValueError: need more than 2 values to unpack
|
ValueError
|
def build_map(master, state, uricache):
tmap = {}
for topic, ttype, tnodes in state:
for p in tnodes:
if not p in uricache:
uricache[p] = master.lookupNode(p)
uri = uricache[p]
puri = urlparse(uri)
if not puri.hostname in tmap:
tmap[puri.hostname] = []
# recreate the system state data structure, but for a single host
matches = [l for x, _, l in tmap[puri.hostname] if x == topic]
if matches:
matches[0].append(p)
else:
tmap[puri.hostname].append((topic, ttype, [p]))
return tmap
|
def build_map(master, state, uricache):
tmap = {}
for topic, ttype, tnodes in state:
for p in tnodes:
if not p in uricache:
uricache[p] = master.lookupNode(p)
uri = uricache[p]
puri = urlparse(uri)
if not puri.hostname in tmap:
tmap[puri.hostname] = []
# recreate the system state data structure, but for a single host
matches = [l for x, l in tmap[puri.hostname] if x == topic]
if matches:
matches[0].append(p)
else:
tmap[puri.hostname].append((topic, [p]))
return tmap
|
https://github.com/ros/ros_comm/issues/1758
|
Host [vboxros]:
Traceback (most recent call last):
File "/opt/ros/melodic/bin/rostopic", line 35, in <module>
rostopic.rostopicmain()
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 2119, in rostopicmain
_rostopic_cmd_list(argv)
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 2059, in _rostopic_cmd_list
exitval = _rostopic_list(topic, verbose=options.verbose, subscribers_only=options.subscribers, publishers_only=options.publishers, group_by_host=options.hostname) or 0
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 1242, in _rostopic_list
verbose, indent=' ')
File "/opt/ros/melodic/lib/python2.7/dist-packages/rostopic/__init__.py", line 1150, in _sub_rostopic_list
topics = list(set([t for t, _, _ in pubs] + [t for t, _, _ in subs]))
ValueError: need more than 2 values to unpack
|
ValueError
|
def get_start_time(self):
"""
Returns the start time of the bag.
@return: a timestamp of the start of the bag
@rtype: float, timestamp in seconds, includes fractions of a second
"""
if self._chunks:
start_stamp = self._chunks[0].start_time.to_sec()
else:
if not self._connection_indexes:
raise ROSBagException("Bag contains no message")
start_stamps = [
index[0].time.to_sec()
for index in self._connection_indexes.values()
if index
]
start_stamp = min(start_stamps) if start_stamps else 0
return start_stamp
|
def get_start_time(self):
"""
Returns the start time of the bag.
@return: a timestamp of the start of the bag
@rtype: float, timestamp in seconds, includes fractions of a second
"""
if self._chunks:
start_stamp = self._chunks[0].start_time.to_sec()
else:
if not self._connection_indexes:
raise ROSBagException("Bag contains no message")
start_stamp = min(
[index[0].time.to_sec() for index in self._connection_indexes.values()]
)
return start_stamp
|
https://github.com/ros/ros_comm/issues/1099
|
$ unzip rosbag_reindex_issue.zip
$ rosbag info unindexed.bag
ERROR bag unindexed: unindexed.bag. Run rosbag reindex.
$ rosbag reindex unindexed_bag
$ rosbag info unindexed.bag
Traceback (most recent call last):
File "/path/redacted/rosbag/scripts/rosbag_bin.py", line 35, in <module>
rosbag.rosbagmain()
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 863, in rosbagmain
cmds[cmd](argv[2:])
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 149, in info_cmd
print(b)
File "/path/redacted/rosbag/src/rosbag/bag.py", line 628, in __str__
start_stamp = min([index[ 0].time.to_sec() for index in self._connection_indexes.values()])
IndexError: list index out of range
|
IndexError
|
def get_end_time(self):
"""
Returns the end time of the bag.
@return: a timestamp of the end of the bag
@rtype: float, timestamp in seconds, includes fractions of a second
"""
if self._chunks:
end_stamp = self._chunks[-1].end_time.to_sec()
else:
if not self._connection_indexes:
raise ROSBagException("Bag contains no message")
end_stamps = [
index[-1].time.to_sec()
for index in self._connection_indexes.values()
if index
]
end_stamp = max(end_stamps) if end_stamps else 0
return end_stamp
|
def get_end_time(self):
"""
Returns the end time of the bag.
@return: a timestamp of the end of the bag
@rtype: float, timestamp in seconds, includes fractions of a second
"""
if self._chunks:
end_stamp = self._chunks[-1].end_time.to_sec()
else:
if not self._connection_indexes:
raise ROSBagException("Bag contains no message")
end_stamp = max(
[index[-1].time.to_sec() for index in self._connection_indexes.values()]
)
return end_stamp
|
https://github.com/ros/ros_comm/issues/1099
|
$ unzip rosbag_reindex_issue.zip
$ rosbag info unindexed.bag
ERROR bag unindexed: unindexed.bag. Run rosbag reindex.
$ rosbag reindex unindexed_bag
$ rosbag info unindexed.bag
Traceback (most recent call last):
File "/path/redacted/rosbag/scripts/rosbag_bin.py", line 35, in <module>
rosbag.rosbagmain()
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 863, in rosbagmain
cmds[cmd](argv[2:])
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 149, in info_cmd
print(b)
File "/path/redacted/rosbag/src/rosbag/bag.py", line 628, in __str__
start_stamp = min([index[ 0].time.to_sec() for index in self._connection_indexes.values()])
IndexError: list index out of range
|
IndexError
|
def __str__(self):
rows = []
try:
if self._filename:
rows.append(("path", self._filename))
if self._version == 102 and type(self._reader) == _BagReader102_Unindexed:
rows.append(("version", "1.2 (unindexed)"))
else:
rows.append(
("version", "%d.%d" % (int(self._version / 100), self._version % 100))
)
if not self._connection_indexes and not self._chunks:
rows.append(("size", _human_readable_size(self.size)))
else:
if self._chunks:
start_stamp = self._chunks[0].start_time.to_sec()
end_stamp = self._chunks[-1].end_time.to_sec()
else:
start_stamps = [
index[0].time.to_sec()
for index in self._connection_indexes.values()
if index
]
start_stamp = min(start_stamps) if start_stamps else 0
end_stamps = [
index[-1].time.to_sec()
for index in self._connection_indexes.values()
if index
]
end_stamp = max(end_stamps) if end_stamps else 0
# Show duration
duration = end_stamp - start_stamp
dur_secs = duration % 60
dur_mins = int(duration / 60)
dur_hrs = int(dur_mins / 60)
if dur_hrs > 0:
dur_mins = dur_mins % 60
duration_str = "%dhr %d:%02ds (%ds)" % (
dur_hrs,
dur_mins,
dur_secs,
duration,
)
elif dur_mins > 0:
duration_str = "%d:%02ds (%ds)" % (dur_mins, dur_secs, duration)
else:
duration_str = "%.1fs" % duration
rows.append(("duration", duration_str))
# Show start and end times
rows.append(
("start", "%s (%.2f)" % (_time_to_str(start_stamp), start_stamp))
)
rows.append(("end", "%s (%.2f)" % (_time_to_str(end_stamp), end_stamp)))
rows.append(("size", _human_readable_size(self.size)))
if self._chunks:
num_messages = 0
for c in self._chunks:
for counts in c.connection_counts.values():
num_messages += counts
else:
num_messages = sum(
[len(index) for index in self._connection_indexes.values()]
)
rows.append(("messages", str(num_messages)))
# Show compression information
if len(self._chunk_headers) == 0:
rows.append(("compression", "none"))
else:
compression_counts = {}
compression_uncompressed = {}
compression_compressed = {}
for chunk_header in self._chunk_headers.values():
if chunk_header.compression not in compression_counts:
compression_counts[chunk_header.compression] = 1
compression_uncompressed[chunk_header.compression] = (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] = (
chunk_header.compressed_size
)
else:
compression_counts[chunk_header.compression] += 1
compression_uncompressed[chunk_header.compression] += (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] += (
chunk_header.compressed_size
)
chunk_count = len(self._chunk_headers)
compressions = []
for count, compression in reversed(
sorted([(v, k) for k, v in compression_counts.items()])
):
if compression != Compression.NONE:
fraction = (
100.0 * compression_compressed[compression]
) / compression_uncompressed[compression]
compressions.append(
"%s [%d/%d chunks; %.2f%%]"
% (compression, count, chunk_count, fraction)
)
else:
compressions.append(
"%s [%d/%d chunks]" % (compression, count, chunk_count)
)
rows.append(("compression", ", ".join(compressions)))
all_uncompressed = (
sum(
[
count
for c, count in compression_counts.items()
if c != Compression.NONE
]
)
== 0
)
if not all_uncompressed:
total_uncompressed_size = sum(
(h.uncompressed_size for h in self._chunk_headers.values())
)
total_compressed_size = sum(
(h.compressed_size for h in self._chunk_headers.values())
)
total_uncompressed_size_str = _human_readable_size(
total_uncompressed_size
)
total_compressed_size_str = _human_readable_size(
total_compressed_size
)
total_size_str_length = max(
[
len(total_uncompressed_size_str),
len(total_compressed_size_str),
]
)
if duration > 0:
uncompressed_rate_str = _human_readable_size(
total_uncompressed_size / duration
)
compressed_rate_str = _human_readable_size(
total_compressed_size / duration
)
rate_str_length = max(
[len(uncompressed_rate_str), len(compressed_rate_str)]
)
rows.append(
(
"uncompressed",
"%*s @ %*s/s"
% (
total_size_str_length,
total_uncompressed_size_str,
rate_str_length,
uncompressed_rate_str,
),
)
)
rows.append(
(
"compressed",
"%*s @ %*s/s (%.2f%%)"
% (
total_size_str_length,
total_compressed_size_str,
rate_str_length,
compressed_rate_str,
(100.0 * total_compressed_size)
/ total_uncompressed_size,
),
)
)
else:
rows.append(
(
"uncompressed",
"%*s"
% (total_size_str_length, total_uncompressed_size_str),
)
)
rows.append(
(
"compressed",
"%*s"
% (total_size_str_length, total_compressed_size_str),
)
)
datatypes = set()
datatype_infos = []
for connection in self._connections.values():
if connection.datatype in datatypes:
continue
datatype_infos.append(
(connection.datatype, connection.md5sum, connection.msg_def)
)
datatypes.add(connection.datatype)
topics = sorted(set([c.topic for c in self._get_connections()]))
topic_datatypes = {}
topic_conn_counts = {}
topic_msg_counts = {}
topic_freqs_median = {}
for topic in topics:
connections = list(self._get_connections(topic))
topic_datatypes[topic] = connections[0].datatype
topic_conn_counts[topic] = len(connections)
msg_count = 0
for connection in connections:
for chunk in self._chunks:
msg_count += chunk.connection_counts.get(connection.id, 0)
topic_msg_counts[topic] = msg_count
if self._connection_indexes_read:
stamps = [
entry.time.to_sec() for entry in self._get_entries(connections)
]
if len(stamps) > 1:
periods = [s1 - s0 for s1, s0 in zip(stamps[1:], stamps[:-1])]
med_period = _median(periods)
if med_period > 0.0:
topic_freqs_median[topic] = 1.0 / med_period
topics = sorted(topic_datatypes.keys())
max_topic_len = max([len(topic) for topic in topics])
max_datatype_len = max([len(datatype) for datatype in datatypes])
max_msg_count_len = max(
[len("%d" % msg_count) for msg_count in topic_msg_counts.values()]
)
max_freq_median_len = (
max(
[
len(_human_readable_frequency(freq))
for freq in topic_freqs_median.values()
]
)
if len(topic_freqs_median) > 0
else 0
)
# Show datatypes
for i, (datatype, md5sum, msg_def) in enumerate(sorted(datatype_infos)):
s = "%-*s [%s]" % (max_datatype_len, datatype, md5sum)
if i == 0:
rows.append(("types", s))
else:
rows.append(("", s))
# Show topics
for i, topic in enumerate(topics):
topic_msg_count = topic_msg_counts[topic]
s = "%-*s %*d %s" % (
max_topic_len,
topic,
max_msg_count_len,
topic_msg_count,
"msgs" if topic_msg_count > 1 else "msg ",
)
if topic in topic_freqs_median:
s += " @ %*s" % (
max_freq_median_len,
_human_readable_frequency(topic_freqs_median[topic]),
)
else:
s += " %*s" % (max_freq_median_len, "")
s += " : %-*s" % (max_datatype_len, topic_datatypes[topic])
if topic_conn_counts[topic] > 1:
s += " (%d connections)" % topic_conn_counts[topic]
if i == 0:
rows.append(("topics", s))
else:
rows.append(("", s))
except Exception as ex:
raise
first_column_width = max([len(field) for field, _ in rows]) + 1
s = ""
for field, value in rows:
if field:
s += "%-*s %s\n" % (first_column_width, field + ":", value)
else:
s += "%-*s %s\n" % (first_column_width, "", value)
return s.rstrip()
|
def __str__(self):
rows = []
try:
if self._filename:
rows.append(("path", self._filename))
if self._version == 102 and type(self._reader) == _BagReader102_Unindexed:
rows.append(("version", "1.2 (unindexed)"))
else:
rows.append(
("version", "%d.%d" % (int(self._version / 100), self._version % 100))
)
if not self._connection_indexes and not self._chunks:
rows.append(("size", _human_readable_size(self.size)))
else:
if self._chunks:
start_stamp = self._chunks[0].start_time.to_sec()
end_stamp = self._chunks[-1].end_time.to_sec()
else:
start_stamp = min(
[
index[0].time.to_sec()
for index in self._connection_indexes.values()
]
)
end_stamp = max(
[
index[-1].time.to_sec()
for index in self._connection_indexes.values()
]
)
# Show duration
duration = end_stamp - start_stamp
dur_secs = duration % 60
dur_mins = int(duration / 60)
dur_hrs = int(dur_mins / 60)
if dur_hrs > 0:
dur_mins = dur_mins % 60
duration_str = "%dhr %d:%02ds (%ds)" % (
dur_hrs,
dur_mins,
dur_secs,
duration,
)
elif dur_mins > 0:
duration_str = "%d:%02ds (%ds)" % (dur_mins, dur_secs, duration)
else:
duration_str = "%.1fs" % duration
rows.append(("duration", duration_str))
# Show start and end times
rows.append(
("start", "%s (%.2f)" % (_time_to_str(start_stamp), start_stamp))
)
rows.append(("end", "%s (%.2f)" % (_time_to_str(end_stamp), end_stamp)))
rows.append(("size", _human_readable_size(self.size)))
if self._chunks:
num_messages = 0
for c in self._chunks:
for counts in c.connection_counts.values():
num_messages += counts
else:
num_messages = sum(
[len(index) for index in self._connection_indexes.values()]
)
rows.append(("messages", str(num_messages)))
# Show compression information
if len(self._chunk_headers) == 0:
rows.append(("compression", "none"))
else:
compression_counts = {}
compression_uncompressed = {}
compression_compressed = {}
for chunk_header in self._chunk_headers.values():
if chunk_header.compression not in compression_counts:
compression_counts[chunk_header.compression] = 1
compression_uncompressed[chunk_header.compression] = (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] = (
chunk_header.compressed_size
)
else:
compression_counts[chunk_header.compression] += 1
compression_uncompressed[chunk_header.compression] += (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] += (
chunk_header.compressed_size
)
chunk_count = len(self._chunk_headers)
compressions = []
for count, compression in reversed(
sorted([(v, k) for k, v in compression_counts.items()])
):
if compression != Compression.NONE:
fraction = (
100.0 * compression_compressed[compression]
) / compression_uncompressed[compression]
compressions.append(
"%s [%d/%d chunks; %.2f%%]"
% (compression, count, chunk_count, fraction)
)
else:
compressions.append(
"%s [%d/%d chunks]" % (compression, count, chunk_count)
)
rows.append(("compression", ", ".join(compressions)))
all_uncompressed = (
sum(
[
count
for c, count in compression_counts.items()
if c != Compression.NONE
]
)
== 0
)
if not all_uncompressed:
total_uncompressed_size = sum(
(h.uncompressed_size for h in self._chunk_headers.values())
)
total_compressed_size = sum(
(h.compressed_size for h in self._chunk_headers.values())
)
total_uncompressed_size_str = _human_readable_size(
total_uncompressed_size
)
total_compressed_size_str = _human_readable_size(
total_compressed_size
)
total_size_str_length = max(
[
len(total_uncompressed_size_str),
len(total_compressed_size_str),
]
)
if duration > 0:
uncompressed_rate_str = _human_readable_size(
total_uncompressed_size / duration
)
compressed_rate_str = _human_readable_size(
total_compressed_size / duration
)
rate_str_length = max(
[len(uncompressed_rate_str), len(compressed_rate_str)]
)
rows.append(
(
"uncompressed",
"%*s @ %*s/s"
% (
total_size_str_length,
total_uncompressed_size_str,
rate_str_length,
uncompressed_rate_str,
),
)
)
rows.append(
(
"compressed",
"%*s @ %*s/s (%.2f%%)"
% (
total_size_str_length,
total_compressed_size_str,
rate_str_length,
compressed_rate_str,
(100.0 * total_compressed_size)
/ total_uncompressed_size,
),
)
)
else:
rows.append(
(
"uncompressed",
"%*s"
% (total_size_str_length, total_uncompressed_size_str),
)
)
rows.append(
(
"compressed",
"%*s"
% (total_size_str_length, total_compressed_size_str),
)
)
datatypes = set()
datatype_infos = []
for connection in self._connections.values():
if connection.datatype in datatypes:
continue
datatype_infos.append(
(connection.datatype, connection.md5sum, connection.msg_def)
)
datatypes.add(connection.datatype)
topics = sorted(set([c.topic for c in self._get_connections()]))
topic_datatypes = {}
topic_conn_counts = {}
topic_msg_counts = {}
topic_freqs_median = {}
for topic in topics:
connections = list(self._get_connections(topic))
topic_datatypes[topic] = connections[0].datatype
topic_conn_counts[topic] = len(connections)
msg_count = 0
for connection in connections:
for chunk in self._chunks:
msg_count += chunk.connection_counts.get(connection.id, 0)
topic_msg_counts[topic] = msg_count
if self._connection_indexes_read:
stamps = [
entry.time.to_sec() for entry in self._get_entries(connections)
]
if len(stamps) > 1:
periods = [s1 - s0 for s1, s0 in zip(stamps[1:], stamps[:-1])]
med_period = _median(periods)
if med_period > 0.0:
topic_freqs_median[topic] = 1.0 / med_period
topics = sorted(topic_datatypes.keys())
max_topic_len = max([len(topic) for topic in topics])
max_datatype_len = max([len(datatype) for datatype in datatypes])
max_msg_count_len = max(
[len("%d" % msg_count) for msg_count in topic_msg_counts.values()]
)
max_freq_median_len = (
max(
[
len(_human_readable_frequency(freq))
for freq in topic_freqs_median.values()
]
)
if len(topic_freqs_median) > 0
else 0
)
# Show datatypes
for i, (datatype, md5sum, msg_def) in enumerate(sorted(datatype_infos)):
s = "%-*s [%s]" % (max_datatype_len, datatype, md5sum)
if i == 0:
rows.append(("types", s))
else:
rows.append(("", s))
# Show topics
for i, topic in enumerate(topics):
topic_msg_count = topic_msg_counts[topic]
s = "%-*s %*d %s" % (
max_topic_len,
topic,
max_msg_count_len,
topic_msg_count,
"msgs" if topic_msg_count > 1 else "msg ",
)
if topic in topic_freqs_median:
s += " @ %*s" % (
max_freq_median_len,
_human_readable_frequency(topic_freqs_median[topic]),
)
else:
s += " %*s" % (max_freq_median_len, "")
s += " : %-*s" % (max_datatype_len, topic_datatypes[topic])
if topic_conn_counts[topic] > 1:
s += " (%d connections)" % topic_conn_counts[topic]
if i == 0:
rows.append(("topics", s))
else:
rows.append(("", s))
except Exception as ex:
raise
first_column_width = max([len(field) for field, _ in rows]) + 1
s = ""
for field, value in rows:
if field:
s += "%-*s %s\n" % (first_column_width, field + ":", value)
else:
s += "%-*s %s\n" % (first_column_width, "", value)
return s.rstrip()
|
https://github.com/ros/ros_comm/issues/1099
|
$ unzip rosbag_reindex_issue.zip
$ rosbag info unindexed.bag
ERROR bag unindexed: unindexed.bag. Run rosbag reindex.
$ rosbag reindex unindexed_bag
$ rosbag info unindexed.bag
Traceback (most recent call last):
File "/path/redacted/rosbag/scripts/rosbag_bin.py", line 35, in <module>
rosbag.rosbagmain()
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 863, in rosbagmain
cmds[cmd](argv[2:])
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 149, in info_cmd
print(b)
File "/path/redacted/rosbag/src/rosbag/bag.py", line 628, in __str__
start_stamp = min([index[ 0].time.to_sec() for index in self._connection_indexes.values()])
IndexError: list index out of range
|
IndexError
|
def _get_yaml_info(self, key=None):
s = ""
try:
if self._filename:
s += "path: %s\n" % self._filename
if self._version == 102 and type(self._reader) == _BagReader102_Unindexed:
s += "version: 1.2 (unindexed)\n"
else:
s += "version: %d.%d\n" % (int(self._version / 100), self._version % 100)
if not self._connection_indexes and not self._chunks:
s += "size: %d\n" % self.size
s += "indexed: False\n"
else:
if self._chunks:
start_stamp = self._chunks[0].start_time.to_sec()
end_stamp = self._chunks[-1].end_time.to_sec()
else:
start_stamps = [
index[0].time.to_sec()
for index in self._connection_indexes.values()
if index
]
start_stamp = min(start_stamps) if start_stamps else 0
end_stamps = [
index[-1].time.to_sec()
for index in self._connection_indexes.values()
if index
]
end_stamp = max(end_stamps) if end_stamps else 0
duration = end_stamp - start_stamp
s += "duration: %.6f\n" % duration
s += "start: %.6f\n" % start_stamp
s += "end: %.6f\n" % end_stamp
s += "size: %d\n" % self.size
if self._chunks:
num_messages = 0
for c in self._chunks:
for counts in c.connection_counts.values():
num_messages += counts
else:
num_messages = sum(
[len(index) for index in self._connection_indexes.values()]
)
s += "messages: %d\n" % num_messages
s += "indexed: True\n"
# Show compression information
if len(self._chunk_headers) == 0:
s += "compression: none\n"
else:
compression_counts = {}
compression_uncompressed = {}
compression_compressed = {}
for chunk_header in self._chunk_headers.values():
if chunk_header.compression not in compression_counts:
compression_counts[chunk_header.compression] = 1
compression_uncompressed[chunk_header.compression] = (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] = (
chunk_header.compressed_size
)
else:
compression_counts[chunk_header.compression] += 1
compression_uncompressed[chunk_header.compression] += (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] += (
chunk_header.compressed_size
)
chunk_count = len(self._chunk_headers)
main_compression_count, main_compression = list(
reversed(sorted([(v, k) for k, v in compression_counts.items()]))
)[0]
s += "compression: %s\n" % str(main_compression)
all_uncompressed = (
sum(
[
count
for c, count in compression_counts.items()
if c != Compression.NONE
]
)
== 0
)
if not all_uncompressed:
s += "uncompressed: %d\n" % sum(
(h.uncompressed_size for h in self._chunk_headers.values())
)
s += "compressed: %d\n" % sum(
(h.compressed_size for h in self._chunk_headers.values())
)
datatypes = set()
datatype_infos = []
for connection in self._connections.values():
if connection.datatype in datatypes:
continue
datatype_infos.append(
(connection.datatype, connection.md5sum, connection.msg_def)
)
datatypes.add(connection.datatype)
topics = sorted(set([c.topic for c in self._get_connections()]))
topic_datatypes = {}
topic_conn_counts = {}
topic_msg_counts = {}
topic_freqs_median = {}
for topic in topics:
connections = list(self._get_connections(topic))
topic_datatypes[topic] = connections[0].datatype
topic_conn_counts[topic] = len(connections)
msg_count = 0
for connection in connections:
for chunk in self._chunks:
msg_count += chunk.connection_counts.get(connection.id, 0)
topic_msg_counts[topic] = msg_count
if self._connection_indexes_read:
stamps = [
entry.time.to_sec() for entry in self._get_entries(connections)
]
if len(stamps) > 1:
periods = [s1 - s0 for s1, s0 in zip(stamps[1:], stamps[:-1])]
med_period = _median(periods)
if med_period > 0.0:
topic_freqs_median[topic] = 1.0 / med_period
topics = sorted(topic_datatypes.keys())
max_topic_len = max([len(topic) for topic in topics])
max_datatype_len = max([len(datatype) for datatype in datatypes])
max_msg_count_len = max(
[len("%d" % msg_count) for msg_count in topic_msg_counts.values()]
)
max_freq_median_len = (
max(
[
len(_human_readable_frequency(freq))
for freq in topic_freqs_median.values()
]
)
if len(topic_freqs_median) > 0
else 0
)
# Show datatypes
s += "types:\n"
for i, (datatype, md5sum, msg_def) in enumerate(sorted(datatype_infos)):
s += " - type: %s\n" % datatype
s += " md5: %s\n" % md5sum
# Show topics
s += "topics:\n"
for i, topic in enumerate(topics):
topic_msg_count = topic_msg_counts[topic]
s += " - topic: %s\n" % topic
s += " type: %s\n" % topic_datatypes[topic]
s += " messages: %d\n" % topic_msg_count
if topic_conn_counts[topic] > 1:
s += " connections: %d\n" % topic_conn_counts[topic]
if topic in topic_freqs_median:
s += " frequency: %.4f\n" % topic_freqs_median[topic]
if not key:
return s
class DictObject(object):
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(
self,
a,
[DictObject(x) if isinstance(x, dict) else x for x in b],
)
else:
setattr(self, a, DictObject(b) if isinstance(b, dict) else b)
obj = DictObject(yaml.load(s))
try:
val = eval("obj." + key)
except Exception as ex:
print('Error getting key "%s"' % key, file=sys.stderr)
return None
def print_yaml(val, indent=0):
indent_str = " " * indent
if type(val) is list:
s = ""
for item in val:
s += "%s- %s\n" % (indent_str, print_yaml(item, indent + 1))
return s
elif type(val) is DictObject:
s = ""
for i, (k, v) in enumerate(val.__dict__.items()):
if i != 0:
s += indent_str
s += "%s: %s" % (k, str(v))
if i < len(val.__dict__) - 1:
s += "\n"
return s
else:
return indent_str + str(val)
return print_yaml(val)
except Exception as ex:
raise
|
def _get_yaml_info(self, key=None):
s = ""
try:
if self._filename:
s += "path: %s\n" % self._filename
if self._version == 102 and type(self._reader) == _BagReader102_Unindexed:
s += "version: 1.2 (unindexed)\n"
else:
s += "version: %d.%d\n" % (int(self._version / 100), self._version % 100)
if not self._connection_indexes and not self._chunks:
s += "size: %d\n" % self.size
s += "indexed: False\n"
else:
if self._chunks:
start_stamp = self._chunks[0].start_time.to_sec()
end_stamp = self._chunks[-1].end_time.to_sec()
else:
start_stamp = min(
[
index[0].time.to_sec()
for index in self._connection_indexes.values()
]
)
end_stamp = max(
[
index[-1].time.to_sec()
for index in self._connection_indexes.values()
]
)
duration = end_stamp - start_stamp
s += "duration: %.6f\n" % duration
s += "start: %.6f\n" % start_stamp
s += "end: %.6f\n" % end_stamp
s += "size: %d\n" % self.size
if self._chunks:
num_messages = 0
for c in self._chunks:
for counts in c.connection_counts.values():
num_messages += counts
else:
num_messages = sum(
[len(index) for index in self._connection_indexes.values()]
)
s += "messages: %d\n" % num_messages
s += "indexed: True\n"
# Show compression information
if len(self._chunk_headers) == 0:
s += "compression: none\n"
else:
compression_counts = {}
compression_uncompressed = {}
compression_compressed = {}
for chunk_header in self._chunk_headers.values():
if chunk_header.compression not in compression_counts:
compression_counts[chunk_header.compression] = 1
compression_uncompressed[chunk_header.compression] = (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] = (
chunk_header.compressed_size
)
else:
compression_counts[chunk_header.compression] += 1
compression_uncompressed[chunk_header.compression] += (
chunk_header.uncompressed_size
)
compression_compressed[chunk_header.compression] += (
chunk_header.compressed_size
)
chunk_count = len(self._chunk_headers)
main_compression_count, main_compression = list(
reversed(sorted([(v, k) for k, v in compression_counts.items()]))
)[0]
s += "compression: %s\n" % str(main_compression)
all_uncompressed = (
sum(
[
count
for c, count in compression_counts.items()
if c != Compression.NONE
]
)
== 0
)
if not all_uncompressed:
s += "uncompressed: %d\n" % sum(
(h.uncompressed_size for h in self._chunk_headers.values())
)
s += "compressed: %d\n" % sum(
(h.compressed_size for h in self._chunk_headers.values())
)
datatypes = set()
datatype_infos = []
for connection in self._connections.values():
if connection.datatype in datatypes:
continue
datatype_infos.append(
(connection.datatype, connection.md5sum, connection.msg_def)
)
datatypes.add(connection.datatype)
topics = sorted(set([c.topic for c in self._get_connections()]))
topic_datatypes = {}
topic_conn_counts = {}
topic_msg_counts = {}
topic_freqs_median = {}
for topic in topics:
connections = list(self._get_connections(topic))
topic_datatypes[topic] = connections[0].datatype
topic_conn_counts[topic] = len(connections)
msg_count = 0
for connection in connections:
for chunk in self._chunks:
msg_count += chunk.connection_counts.get(connection.id, 0)
topic_msg_counts[topic] = msg_count
if self._connection_indexes_read:
stamps = [
entry.time.to_sec() for entry in self._get_entries(connections)
]
if len(stamps) > 1:
periods = [s1 - s0 for s1, s0 in zip(stamps[1:], stamps[:-1])]
med_period = _median(periods)
if med_period > 0.0:
topic_freqs_median[topic] = 1.0 / med_period
topics = sorted(topic_datatypes.keys())
max_topic_len = max([len(topic) for topic in topics])
max_datatype_len = max([len(datatype) for datatype in datatypes])
max_msg_count_len = max(
[len("%d" % msg_count) for msg_count in topic_msg_counts.values()]
)
max_freq_median_len = (
max(
[
len(_human_readable_frequency(freq))
for freq in topic_freqs_median.values()
]
)
if len(topic_freqs_median) > 0
else 0
)
# Show datatypes
s += "types:\n"
for i, (datatype, md5sum, msg_def) in enumerate(sorted(datatype_infos)):
s += " - type: %s\n" % datatype
s += " md5: %s\n" % md5sum
# Show topics
s += "topics:\n"
for i, topic in enumerate(topics):
topic_msg_count = topic_msg_counts[topic]
s += " - topic: %s\n" % topic
s += " type: %s\n" % topic_datatypes[topic]
s += " messages: %d\n" % topic_msg_count
if topic_conn_counts[topic] > 1:
s += " connections: %d\n" % topic_conn_counts[topic]
if topic in topic_freqs_median:
s += " frequency: %.4f\n" % topic_freqs_median[topic]
if not key:
return s
class DictObject(object):
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(
self,
a,
[DictObject(x) if isinstance(x, dict) else x for x in b],
)
else:
setattr(self, a, DictObject(b) if isinstance(b, dict) else b)
obj = DictObject(yaml.load(s))
try:
val = eval("obj." + key)
except Exception as ex:
print('Error getting key "%s"' % key, file=sys.stderr)
return None
def print_yaml(val, indent=0):
indent_str = " " * indent
if type(val) is list:
s = ""
for item in val:
s += "%s- %s\n" % (indent_str, print_yaml(item, indent + 1))
return s
elif type(val) is DictObject:
s = ""
for i, (k, v) in enumerate(val.__dict__.items()):
if i != 0:
s += indent_str
s += "%s: %s" % (k, str(v))
if i < len(val.__dict__) - 1:
s += "\n"
return s
else:
return indent_str + str(val)
return print_yaml(val)
except Exception as ex:
raise
|
https://github.com/ros/ros_comm/issues/1099
|
$ unzip rosbag_reindex_issue.zip
$ rosbag info unindexed.bag
ERROR bag unindexed: unindexed.bag. Run rosbag reindex.
$ rosbag reindex unindexed_bag
$ rosbag info unindexed.bag
Traceback (most recent call last):
File "/path/redacted/rosbag/scripts/rosbag_bin.py", line 35, in <module>
rosbag.rosbagmain()
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 863, in rosbagmain
cmds[cmd](argv[2:])
File "/path/redacted/rosbag/src/rosbag/rosbag_main.py", line 149, in info_cmd
print(b)
File "/path/redacted/rosbag/src/rosbag/bag.py", line 628, in __str__
start_stamp = min([index[ 0].time.to_sec() for index in self._connection_indexes.values()])
IndexError: list index out of range
|
IndexError
|
def rosmsg_cmd_show(mode, full, alias="show"):
cmd = "ros%s" % (mode[1:])
parser = OptionParser(usage="usage: %s %s [options] <%s>" % (cmd, alias, full))
parser.add_option(
"-r",
"--raw",
dest="raw",
default=False,
action="store_true",
help="show raw message text, including comments",
)
parser.add_option(
"-b",
"--bag",
dest="bag",
default=None,
help="show message from .bag file",
metavar="BAGFILE",
)
options, arg = _stdin_arg(parser, full)
if arg.endswith(mode):
arg = arg[: -(len(mode))]
# try to catch the user specifying code-style types and error
if "::" in arg:
parser.error(
cmd
+ " does not understand C++-style namespaces (i.e. '::').\nPlease refer to msg/srv types as 'package_name/Type'."
)
elif "." in arg:
parser.error(
"invalid message type '%s'.\nPlease refer to msg/srv types as 'package_name/Type'."
% arg
)
if options.bag:
bag_file = options.bag
if not os.path.exists(bag_file):
raise ROSMsgException("ERROR: bag file [%s] does not exist" % bag_file)
for topic, msg, t in rosbag.Bag(bag_file).read_messages(raw=True):
datatype, _, _, _, pytype = msg
if datatype == arg:
if options.raw:
print(pytype._full_text)
else:
context = genmsg.MsgContext.create_default()
msgs = generate_dynamic(datatype, pytype._full_text)
for t, msg in msgs.items():
context.register(t, msg._spec)
print(spec_to_str(context, msgs[datatype]._spec))
break
else:
rospack = rospkg.RosPack()
if "/" in arg: # package specified
rosmsg_debug(rospack, mode, arg, options.raw)
else:
found_msgs = list(rosmsg_search(rospack, mode, arg))
if not found_msgs:
print("Could not find msg '%s'" % arg, file=sys.stderr)
return 1
for found in found_msgs:
print("[%s]:" % found)
rosmsg_debug(rospack, mode, found, options.raw)
|
def rosmsg_cmd_show(mode, full, alias="show"):
cmd = "ros%s" % (mode[1:])
parser = OptionParser(usage="usage: %s %s [options] <%s>" % (cmd, alias, full))
parser.add_option(
"-r",
"--raw",
dest="raw",
default=False,
action="store_true",
help="show raw message text, including comments",
)
parser.add_option(
"-b",
"--bag",
dest="bag",
default=None,
help="show message from .bag file",
metavar="BAGFILE",
)
options, arg = _stdin_arg(parser, full)
if arg.endswith(mode):
arg = arg[: -(len(mode))]
# try to catch the user specifying code-style types and error
if "::" in arg:
parser.error(
cmd
+ " does not understand C++-style namespaces (i.e. '::').\nPlease refer to msg/srv types as 'package_name/Type'."
)
elif "." in arg:
parser.error(
"invalid message type '%s'.\nPlease refer to msg/srv types as 'package_name/Type'."
% arg
)
if options.bag:
bag_file = options.bag
if not os.path.exists(bag_file):
raise ROSMsgException("ERROR: bag file [%s] does not exist" % bag_file)
for topic, msg, t in rosbag.Bag(bag_file).read_messages(raw=True):
datatype, _, _, _, pytype = msg
if datatype == arg:
print(get_msg_text(datatype, options.raw, pytype._full_text))
break
else:
rospack = rospkg.RosPack()
if "/" in arg: # package specified
rosmsg_debug(rospack, mode, arg, options.raw)
else:
found_msgs = list(rosmsg_search(rospack, mode, arg))
if not found_msgs:
print("Could not find msg '%s'" % arg, file=sys.stderr)
return 1
for found in found_msgs:
print("[%s]:" % found)
rosmsg_debug(rospack, mode, found, options.raw)
|
https://github.com/ros/ros_comm/issues/1002
|
Traceback (most recent call last):
File "xxxx/ros_catkin_ws/install_isolated/bin/rosmsg", line 35, in <module>
rosmsg.rosmsgmain()
File "xxxx/ros_catkin_ws/install_isolated/lib/python2.7/site-packages/rosmsg/__init__.py", line 747, in rosmsgmain
sys.exit(rosmsg_cmd_show(ext, full, command))
File "xxxx/ros_catkin_ws/install_isolated/lib/python2.7/site-packages/rosmsg/__init__.py", line 607, in rosmsg_cmd_show
print(get_msg_text(datatype, options.raw, pytype._full_text))
File "xxxx/ros_catkin_ws/install_isolated/lib/python2.7/site-packages/rosmsg/__init__.py", line 425, in get_msg_text
for p in rospack.list():
AttributeError: 'str' object has no attribute 'list'
|
AttributeError
|
def custom_strify_message(
self,
val,
indent="",
time_offset=None,
current_time=None,
field_filter=None,
type_information=None,
fixed_numeric_width=None,
value_transform=None,
):
# ensure to print uint8[] as array of numbers instead of string
if type_information and type_information.startswith("uint8["):
val = [ord(x) for x in val]
if value_transform is not None:
val = value_transform(val, type_information)
return genpy.message.strify_message(
val,
indent=indent,
time_offset=time_offset,
current_time=current_time,
field_filter=field_filter,
fixed_numeric_width=fixed_numeric_width,
)
|
def custom_strify_message(
self,
val,
indent="",
time_offset=None,
current_time=None,
field_filter=None,
type_information=None,
fixed_numeric_width=None,
value_transform=None,
):
# ensure to print uint8[] as array of numbers instead of string
if type_information and type_information.startswith("uint8["):
val = [ord(x) for x in val]
if value_transform is not None:
val = value_transform(val)
return genpy.message.strify_message(
val,
indent=indent,
time_offset=time_offset,
current_time=current_time,
field_filter=field_filter,
fixed_numeric_width=fixed_numeric_width,
)
|
https://github.com/ros/ros_comm/issues/908
|
$ rostopic echo /rosout/msg -n 1
Traceback (most recent call last):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 940, in callback
self.suffix + '\n')
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 876, in custom_strify_message
val = value_transform(val)
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1393, in value_transform
class TransformedMessage(genpy.Message):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1396, in TransformedMessage
__slots__ = val.__slots__[:]
AttributeError: 'str' object has no attribute '__slots__'
$ rostopic echo /rosout/topics -n 1
Traceback (most recent call last):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 940, in callback
self.suffix + '\n')
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 876, in custom_strify_message
val = value_transform(val)
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1393, in value_transform
class TransformedMessage(genpy.Message):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1396, in TransformedMessage
__slots__ = val.__slots__[:]
AttributeError: 'list' object has no attribute '__slots__'
|
AttributeError
|
def create_value_transform(echo_nostr, echo_noarr):
def value_transform(val, type_information=None):
def transform_field_value(value, value_type, echo_nostr, echo_noarr):
if echo_noarr and "[" in value_type:
return "<array type: %s, length: %s>" % (
value_type.rstrip("[]"),
len(value),
)
elif echo_nostr and value_type == "string":
return "<string length: %s>" % len(value)
elif echo_nostr and value_type == "string[]":
return "<array type: string, length: %s>" % len(value)
return value
if not isinstance(val, genpy.Message):
if type_information is None:
return val
return transform_field_value(val, type_information, echo_nostr, echo_noarr)
class TransformedMessage(genpy.Message):
# These should be copy because changing these variables
# in transforming is problematic without its untransforming.
__slots__ = val.__slots__[:]
_slot_types = val._slot_types[:]
val_trans = TransformedMessage()
fields = val.__slots__
field_types = val._slot_types
for index, (f, t) in enumerate(zip(fields, field_types)):
f_val = getattr(val, f)
f_val_trans = transform_field_value(f_val, t, echo_nostr, echo_noarr)
if f_val_trans != f_val:
setattr(val_trans, f, f_val_trans)
val_trans._slot_types[index] = "string"
else:
try:
msg_class = genpy.message.get_message_class(t)
if msg_class is None:
# happens for list of ROS messages like std_msgs/String[]
raise ValueError
nested_transformed = value_transform(f_val)
setattr(val_trans, f, nested_transformed)
except ValueError:
setattr(val_trans, f, f_val)
return val_trans
return value_transform
|
def create_value_transform(echo_nostr, echo_noarr):
def value_transform(val):
class TransformedMessage(genpy.Message):
# These should be copy because changing these variables
# in transforming is problematic without its untransforming.
__slots__ = val.__slots__[:]
_slot_types = val._slot_types[:]
val_trans = TransformedMessage()
fields = val.__slots__
field_types = val._slot_types
for index, (f, t) in enumerate(zip(fields, field_types)):
f_val = getattr(val, f)
if echo_noarr and "[" in t:
setattr(
val_trans,
f,
"<array type: %s, length: %s>" % (t.rstrip("[]"), len(f_val)),
)
val_trans._slot_types[index] = "string"
elif echo_nostr and "string" in t:
setattr(val_trans, f, "<string length: %s>" % len(f_val))
else:
try:
msg_class = genpy.message.get_message_class(t)
if msg_class is None:
# happens for list of ROS messages like std_msgs/String[]
raise ValueError
nested_transformed = value_transform(f_val)
setattr(val_trans, f, nested_transformed)
except ValueError:
setattr(val_trans, f, f_val)
return val_trans
return value_transform
|
https://github.com/ros/ros_comm/issues/908
|
$ rostopic echo /rosout/msg -n 1
Traceback (most recent call last):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 940, in callback
self.suffix + '\n')
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 876, in custom_strify_message
val = value_transform(val)
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1393, in value_transform
class TransformedMessage(genpy.Message):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1396, in TransformedMessage
__slots__ = val.__slots__[:]
AttributeError: 'str' object has no attribute '__slots__'
$ rostopic echo /rosout/topics -n 1
Traceback (most recent call last):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 940, in callback
self.suffix + '\n')
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 876, in custom_strify_message
val = value_transform(val)
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1393, in value_transform
class TransformedMessage(genpy.Message):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1396, in TransformedMessage
__slots__ = val.__slots__[:]
AttributeError: 'list' object has no attribute '__slots__'
|
AttributeError
|
def value_transform(val, type_information=None):
def transform_field_value(value, value_type, echo_nostr, echo_noarr):
if echo_noarr and "[" in value_type:
return "<array type: %s, length: %s>" % (
value_type.rstrip("[]"),
len(value),
)
elif echo_nostr and value_type == "string":
return "<string length: %s>" % len(value)
elif echo_nostr and value_type == "string[]":
return "<array type: string, length: %s>" % len(value)
return value
if not isinstance(val, genpy.Message):
if type_information is None:
return val
return transform_field_value(val, type_information, echo_nostr, echo_noarr)
class TransformedMessage(genpy.Message):
# These should be copy because changing these variables
# in transforming is problematic without its untransforming.
__slots__ = val.__slots__[:]
_slot_types = val._slot_types[:]
val_trans = TransformedMessage()
fields = val.__slots__
field_types = val._slot_types
for index, (f, t) in enumerate(zip(fields, field_types)):
f_val = getattr(val, f)
f_val_trans = transform_field_value(f_val, t, echo_nostr, echo_noarr)
if f_val_trans != f_val:
setattr(val_trans, f, f_val_trans)
val_trans._slot_types[index] = "string"
else:
try:
msg_class = genpy.message.get_message_class(t)
if msg_class is None:
# happens for list of ROS messages like std_msgs/String[]
raise ValueError
nested_transformed = value_transform(f_val)
setattr(val_trans, f, nested_transformed)
except ValueError:
setattr(val_trans, f, f_val)
return val_trans
|
def value_transform(val):
class TransformedMessage(genpy.Message):
# These should be copy because changing these variables
# in transforming is problematic without its untransforming.
__slots__ = val.__slots__[:]
_slot_types = val._slot_types[:]
val_trans = TransformedMessage()
fields = val.__slots__
field_types = val._slot_types
for index, (f, t) in enumerate(zip(fields, field_types)):
f_val = getattr(val, f)
if echo_noarr and "[" in t:
setattr(
val_trans,
f,
"<array type: %s, length: %s>" % (t.rstrip("[]"), len(f_val)),
)
val_trans._slot_types[index] = "string"
elif echo_nostr and "string" in t:
setattr(val_trans, f, "<string length: %s>" % len(f_val))
else:
try:
msg_class = genpy.message.get_message_class(t)
if msg_class is None:
# happens for list of ROS messages like std_msgs/String[]
raise ValueError
nested_transformed = value_transform(f_val)
setattr(val_trans, f, nested_transformed)
except ValueError:
setattr(val_trans, f, f_val)
return val_trans
|
https://github.com/ros/ros_comm/issues/908
|
$ rostopic echo /rosout/msg -n 1
Traceback (most recent call last):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 940, in callback
self.suffix + '\n')
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 876, in custom_strify_message
val = value_transform(val)
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1393, in value_transform
class TransformedMessage(genpy.Message):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1396, in TransformedMessage
__slots__ = val.__slots__[:]
AttributeError: 'str' object has no attribute '__slots__'
$ rostopic echo /rosout/topics -n 1
Traceback (most recent call last):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 940, in callback
self.suffix + '\n')
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 876, in custom_strify_message
val = value_transform(val)
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1393, in value_transform
class TransformedMessage(genpy.Message):
File "<ws>/lib/python2.7/dist-packages/rostopic/__init__.py", line 1396, in TransformedMessage
__slots__ = val.__slots__[:]
AttributeError: 'list' object has no attribute '__slots__'
|
AttributeError
|
def check_roslaunch(f, use_test_depends=False):
"""
Check roslaunch file for errors, returning error message if check fails. This routine
is mainly to support rostest's roslaunch_check.
:param f: roslaunch file name, ``str``
:param use_test_depends: Consider test_depends, ``Bool``
:returns: error message or ``None``
"""
try:
rl_config = roslaunch.config.load_config_default(
[f], DEFAULT_MASTER_PORT, verbose=False
)
except roslaunch.core.RLException as e:
return str(e)
rospack = rospkg.RosPack()
errors = []
# check for missing deps
try:
base_pkg, file_deps, missing = roslaunch.depends.roslaunch_deps(
[f], use_test_depends=use_test_depends
)
except rospkg.common.ResourceNotFound as r:
errors.append("Could not find package [%s] included from [%s]" % (str(r), f))
missing = {}
file_deps = {}
except roslaunch.substitution_args.ArgException as e:
errors.append("Could not resolve arg [%s] in [%s]" % (str(e), f))
missing = {}
file_deps = {}
for pkg, miss in missing.items():
# even if the pkgs is not found in packges.xml, if other package.xml provdes that pkgs, then it will be ok
all_pkgs = []
try:
for file_dep in file_deps.keys():
file_pkg = rospkg.get_package_name(file_dep)
all_pkgs.extend(rospack.get_depends(file_pkg, implicit=False))
miss_all = list(set(miss) - set(all_pkgs))
except Exception as e:
print(e, file=sys.stderr)
miss_all = True
if miss_all:
print(
"Missing package dependencies: %s/package.xml: %s"
% (pkg, ", ".join(miss)),
file=sys.stderr,
)
errors.append(
"Missing package dependencies: %s/package.xml: %s"
% (pkg, ", ".join(miss))
)
elif miss:
print(
"Missing package dependencies: %s/package.xml: %s (notify upstream maintainer)"
% (pkg, ", ".join(miss)),
file=sys.stderr,
)
# load all node defs
nodes = []
for filename, rldeps in file_deps.items():
nodes.extend(rldeps.nodes)
# check for missing packages
for pkg, node_type in nodes:
try:
rospack.get_path(pkg)
except:
errors.append("cannot find package [%s] for node [%s]" % (pkg, node_type))
# check for missing nodes
for pkg, node_type in nodes:
try:
if not roslib.packages.find_node(pkg, node_type, rospack=rospack):
errors.append(
"cannot find node [%s] in package [%s]" % (node_type, pkg)
)
except Exception as e:
errors.append("unable to find node [%s/%s]: %s" % (pkg, node_type, str(e)))
# Check for configuration errors, #2889
for err in rl_config.config_errors:
errors.append("ROSLaunch config error: %s" % err)
if errors:
return "\n".join(errors)
|
def check_roslaunch(f, option_use_test_depends=False):
"""
Check roslaunch file for errors, returning error message if check fails. This routine
is mainly to support rostest's roslaunch_check.
:param f: roslaunch file name, ``str``
:param option_use_test_depends: Consider test_depends, ``Bool``
:returns: error message or ``None``
"""
try:
rl_config = roslaunch.config.load_config_default(
[f], DEFAULT_MASTER_PORT, verbose=False
)
except roslaunch.core.RLException as e:
return str(e)
rospack = rospkg.RosPack()
errors = []
# check for missing deps
try:
base_pkg, file_deps, missing = roslaunch.depends.roslaunch_deps(
[f], use_test_depends=option_use_test_depends
)
except rospkg.common.ResourceNotFound as r:
errors.append("Could not find package [%s] included from [%s]" % (str(r), f))
missing = {}
file_deps = {}
except roslaunch.substitution_args.ArgException as e:
errors.append("Could not resolve arg [%s] in [%s]" % (str(e), f))
missing = {}
file_deps = {}
for pkg, miss in missing.items():
# even if the pkgs is not found in packges.xml, if other package.xml provdes that pkgs, then it will be ok
all_pkgs = []
try:
for file_dep in file_deps.keys():
file_pkg = rospkg.get_package_name(file_dep)
all_pkgs.extend(rospack.get_depends(file_pkg, implicit=False))
miss_all = list(set(miss) - set(all_pkgs))
except Exception as e:
print(e, file=sys.stderr)
miss_all = True
if miss_all:
print(
"Missing package dependencies: %s/package.xml: %s"
% (pkg, ", ".join(miss)),
file=sys.stderr,
)
errors.append(
"Missing package dependencies: %s/package.xml: %s"
% (pkg, ", ".join(miss))
)
elif miss:
print(
"Missing package dependencies: %s/package.xml: %s (notify upstream maintainer)"
% (pkg, ", ".join(miss)),
file=sys.stderr,
)
# load all node defs
nodes = []
for filename, rldeps in file_deps.items():
nodes.extend(rldeps.nodes)
# check for missing packages
for pkg, node_type in nodes:
try:
rospack.get_path(pkg)
except:
errors.append("cannot find package [%s] for node [%s]" % (pkg, node_type))
# check for missing nodes
for pkg, node_type in nodes:
try:
if not roslib.packages.find_node(pkg, node_type, rospack=rospack):
errors.append(
"cannot find node [%s] in package [%s]" % (node_type, pkg)
)
except Exception as e:
errors.append("unable to find node [%s/%s]: %s" % (pkg, node_type, str(e)))
# Check for configuration errors, #2889
for err in rl_config.config_errors:
errors.append("ROSLaunch config error: %s" % err)
if errors:
return "\n".join(errors)
|
https://github.com/ros/ros_comm/issues/893
|
jliviero:~/ws/src/ros_comm$ rosrun roslaunch roslaunch-check -t ../launch/
checking *.launch in directory ../launch/
checking ../launch/demospace.launch
Traceback (most recent call last):
File "/home/jliviero/ws/src/ros_comm/tools/roslaunch/scripts/roslaunch-check", line 93, in <module>
error_msg = check_roslaunch_dir(roslaunch_path, use_test_depends=options.test_depends)
File "/home/jliviero/ws/src/ros_comm/tools/roslaunch/scripts/roslaunch-check", line 57, in check_roslaunch_dir
error_msgs.append(check_roslaunch_file(roslaunch_file, use_test_depends=use_test_depends))
File "/home/jliviero/ws/src/ros_comm/tools/roslaunch/scripts/roslaunch-check", line 46, in check_roslaunch_file
error_msg = roslaunch.rlutil.check_roslaunch(roslaunch_file, use_test_depends=use_test_depends)
TypeError: check_roslaunch() got an unexpected keyword argument 'use_test_depends'
|
TypeError
|
def robust_connect_subscriber(
conn, dest_addr, dest_port, pub_uri, receive_cb, resolved_topic_name
):
"""
Keeps trying to create connection for subscriber. Then passes off to receive_loop once connected.
"""
# kwc: this logic is not very elegant. I am waiting to rewrite
# the I/O loop with async i/o to clean this up.
# timeout is really generous. for now just choosing one that is large but not infinite
interval = 0.5
while conn.socket is None and not conn.done and not rospy.is_shutdown():
try:
conn.connect(dest_addr, dest_port, pub_uri, timeout=60.0)
except rospy.exceptions.TransportInitError as e:
# if the connection was closed intentionally
# because of an unknown error, stop trying
if conn.protocol is None:
conn.done = True
break
rospyerr(
"unable to create subscriber transport: %s. Will try again in %ss",
e,
interval,
)
interval = interval * 2
time.sleep(interval)
# check to see if publisher state has changed
conn.done = not check_if_still_publisher(resolved_topic_name, pub_uri)
if not conn.done:
conn.receive_loop(receive_cb)
|
def robust_connect_subscriber(
conn, dest_addr, dest_port, pub_uri, receive_cb, resolved_topic_name
):
"""
Keeps trying to create connection for subscriber. Then passes off to receive_loop once connected.
"""
# kwc: this logic is not very elegant. I am waiting to rewrite
# the I/O loop with async i/o to clean this up.
# timeout is really generous. for now just choosing one that is large but not infinite
interval = 0.5
while conn.socket is None and not conn.done and not rospy.is_shutdown():
try:
conn.connect(dest_addr, dest_port, pub_uri, timeout=60.0)
except rospy.exceptions.TransportInitError as e:
rospyerr(
"unable to create subscriber transport: %s. Will try again in %ss",
e,
interval,
)
interval = interval * 2
time.sleep(interval)
# check to see if publisher state has changed
conn.done = not check_if_still_publisher(resolved_topic_name, pub_uri)
if not conn.done:
conn.receive_loop(receive_cb)
|
https://github.com/ros/ros_comm/issues/533
|
[rospy.internal][WARNING] 2014-11-11 20:39:42,119: Unknown error initiating TCP/IP socket to pv1106:39758 (http://pv1106:34792/): Traceback (most recent call last):
File "/data/users/rlinsalata/dev/desk/bugs/10848_mem-leak/rosbridge/catkin_ws/src/ros_comm/clients/rospy/src/rospy/impl/tcpros_base.py", line 557, in connect
self.read_header()
File "/data/users/rlinsalata/dev/desk/bugs/10848_mem-leak/rosbridge/catkin_ws/src/ros_comm/clients/rospy/src/rospy/impl/tcpros_base.py", line 618, in read_header
self._validate_header(read_ros_handshake_header(sock, self.read_buff, self.protocol.buff_size))
AttributeError: 'NoneType' object has no attribute 'buff_size'
[rospy.internal][INFO] 2014-11-11 20:39:42,119: topic[/robot/limb/left/endpoint_state] removing connection to http://pv1106:34792/
[rospy.internal][ERROR] 2014-11-11 20:39:42,119: unable to create subscriber transport: 'NoneType' object has no attribute 'buff_size'. Will try again in 64.0s
|
AttributeError
|
def main(argv=sys.argv):
options = None
logger = None
try:
from . import rlutil
parser = _get_optparse()
(options, args) = parser.parse_args(argv[1:])
args = rlutil.resolve_launch_arguments(args)
_validate_args(parser, options, args)
# node args doesn't require any roslaunch infrastructure, so process it first
if any(
[
options.node_args,
options.node_list,
options.find_node,
options.dump_params,
options.file_list,
options.ros_args,
]
):
if options.node_args and not args:
parser.error("please specify a launch file")
from . import node_args
if options.node_args:
node_args.print_node_args(options.node_args, args)
elif options.find_node:
node_args.print_node_filename(options.find_node, args)
# Dump parameters, #2685
elif options.dump_params:
roslaunch_param_dump.dump_params(args)
elif options.file_list:
rlutil.print_file_list(args)
elif options.ros_args:
import arg_dump as roslaunch_arg_dump
roslaunch_arg_dump.dump_args(args)
else:
node_args.print_node_list(args)
return
# we have to wait for the master here because we don't have the run_id yet
if options.wait_for_master:
if options.core:
parser.error("--wait cannot be used with roscore")
rlutil._wait_for_master()
# write the pid to a file
write_pid_file(options.pid_fn, options.core, options.port)
# spin up the logging infrastructure. have to wait until we can read options.run_id
uuid = rlutil.get_or_generate_uuid(options.run_id, options.wait_for_master)
configure_logging(uuid)
# #3088: don't check disk usage on remote machines
if not options.child_name and not options.skip_log_check:
# #2761
rlutil.check_log_disk_usage()
logger = logging.getLogger("roslaunch")
logger.info("roslaunch starting with args %s" % str(argv))
logger.info("roslaunch env is %s" % os.environ)
if options.child_name:
logger.info("starting in child mode")
# This is a roslaunch child, spin up client server.
# client spins up an XML-RPC server that waits for
# commands and configuration from the server.
from . import child as roslaunch_child
c = roslaunch_child.ROSLaunchChild(
uuid, options.child_name, options.server_uri
)
c.run()
else:
logger.info("starting in server mode")
# #1491 change terminal name
if not options.disable_title:
rlutil.change_terminal_name(args, options.core)
# Read roslaunch string from stdin when - is passed as launch filename.
roslaunch_strs = []
if "-" in args:
roslaunch_core.printlog(
"Passed '-' as file argument, attempting to read roslaunch XML from stdin."
)
roslaunch_strs.append(sys.stdin.read())
roslaunch_core.printlog(
"... %d bytes read successfully.\n" % len(roslaunch_strs[-1])
)
args.remove("-")
# This is a roslaunch parent, spin up parent server and launch processes.
# args are the roslaunch files to load
from . import parent as roslaunch_parent
try:
# force a port binding spec if we are running a core
if options.core:
options.port = options.port or DEFAULT_MASTER_PORT
p = roslaunch_parent.ROSLaunchParent(
uuid,
args,
roslaunch_strs=roslaunch_strs,
is_core=options.core,
port=options.port,
local_only=options.local_only,
verbose=options.verbose,
force_screen=options.force_screen,
)
p.start()
p.spin()
finally:
# remove the pid file
if options.pid_fn:
try:
os.unlink(options.pid_fn)
except os.error:
pass
except RLException as e:
roslaunch_core.printerrlog(str(e))
roslaunch_core.printerrlog(
"The traceback for the exception was written to the log file"
)
if logger:
logger.error(traceback.format_exc())
sys.exit(1)
except ValueError as e:
# TODO: need to trap better than this high-level trap
roslaunch_core.printerrlog(str(e))
roslaunch_core.printerrlog(
"The traceback for the exception was written to the log file"
)
if logger:
logger.error(traceback.format_exc())
sys.exit(1)
except Exception as e:
traceback.print_exc()
sys.exit(1)
|
def main(argv=sys.argv):
options = None
try:
from . import rlutil
parser = _get_optparse()
(options, args) = parser.parse_args(argv[1:])
args = rlutil.resolve_launch_arguments(args)
_validate_args(parser, options, args)
# node args doesn't require any roslaunch infrastructure, so process it first
if any(
[
options.node_args,
options.node_list,
options.find_node,
options.dump_params,
options.file_list,
options.ros_args,
]
):
if options.node_args and not args:
parser.error("please specify a launch file")
from . import node_args
if options.node_args:
node_args.print_node_args(options.node_args, args)
elif options.find_node:
node_args.print_node_filename(options.find_node, args)
# Dump parameters, #2685
elif options.dump_params:
roslaunch_param_dump.dump_params(args)
elif options.file_list:
rlutil.print_file_list(args)
elif options.ros_args:
import arg_dump as roslaunch_arg_dump
roslaunch_arg_dump.dump_args(args)
else:
node_args.print_node_list(args)
return
# we have to wait for the master here because we don't have the run_id yet
if options.wait_for_master:
if options.core:
parser.error("--wait cannot be used with roscore")
rlutil._wait_for_master()
# write the pid to a file
write_pid_file(options.pid_fn, options.core, options.port)
# spin up the logging infrastructure. have to wait until we can read options.run_id
uuid = rlutil.get_or_generate_uuid(options.run_id, options.wait_for_master)
configure_logging(uuid)
# #3088: don't check disk usage on remote machines
if not options.child_name and not options.skip_log_check:
# #2761
rlutil.check_log_disk_usage()
logger = logging.getLogger("roslaunch")
logger.info("roslaunch starting with args %s" % str(argv))
logger.info("roslaunch env is %s" % os.environ)
if options.child_name:
logger.info("starting in child mode")
# This is a roslaunch child, spin up client server.
# client spins up an XML-RPC server that waits for
# commands and configuration from the server.
from . import child as roslaunch_child
c = roslaunch_child.ROSLaunchChild(
uuid, options.child_name, options.server_uri
)
c.run()
else:
logger.info("starting in server mode")
# #1491 change terminal name
if not options.disable_title:
rlutil.change_terminal_name(args, options.core)
# Read roslaunch string from stdin when - is passed as launch filename.
roslaunch_strs = []
if "-" in args:
roslaunch_core.printlog(
"Passed '-' as file argument, attempting to read roslaunch XML from stdin."
)
roslaunch_strs.append(sys.stdin.read())
roslaunch_core.printlog(
"... %d bytes read successfully.\n" % len(roslaunch_strs[-1])
)
args.remove("-")
# This is a roslaunch parent, spin up parent server and launch processes.
# args are the roslaunch files to load
from . import parent as roslaunch_parent
try:
# force a port binding spec if we are running a core
if options.core:
options.port = options.port or DEFAULT_MASTER_PORT
p = roslaunch_parent.ROSLaunchParent(
uuid,
args,
roslaunch_strs=roslaunch_strs,
is_core=options.core,
port=options.port,
local_only=options.local_only,
verbose=options.verbose,
force_screen=options.force_screen,
)
p.start()
p.spin()
finally:
# remove the pid file
if options.pid_fn:
try:
os.unlink(options.pid_fn)
except os.error:
pass
except RLException as e:
roslaunch_core.printerrlog(str(e))
roslaunch_core.printerrlog(
"The traceback for the exception was written to the log file"
)
logger.error(traceback.format_exc())
sys.exit(1)
except ValueError as e:
# TODO: need to trap better than this high-level trap
roslaunch_core.printerrlog(str(e))
roslaunch_core.printerrlog(
"The traceback for the exception was written to the log file"
)
logger.error(traceback.format_exc())
sys.exit(1)
except Exception as e:
traceback.print_exc()
sys.exit(1)
|
https://github.com/ros/ros_comm/issues/490
|
ros@host1:~$ roslaunch openni2_launch openni2.launch
[openni2.launch] is neither a launch file in package [openni2_launch] nor is [openni2_launch] a launch file name
The traceback for the exception was written to the log file
Traceback (most recent call last):openni2_launch
File "/opt/ros/indigo/bin/roslaunch", line 35, in <module>
roslaunch.main()
File "/opt/ros/indigo/lib/python2.7/dist-packages/roslaunch/__init__.py", line 292, in main
logger.error(traceback.format_exc())
UnboundLocalError: local variable 'logger' referenced before assignment
|
UnboundLocalError
|
def _run(self):
while not self._connection.done:
queue = []
with self._lock:
# wait for available data
while not self._queue and not self._connection.done:
self._waiting = True
self._cond_data_available.wait(1.0)
self._waiting = False
if self._queue:
self._cond_queue_swapped.notify()
# take all data from queue for processing outside of the lock
if self._queue:
queue = self._queue
self._queue = []
# relay all data
for data in queue:
try:
self._connection.write_data(data)
except Exception as e:
with self._lock:
self._error = e
|
def _run(self):
while not self._connection.done:
queue = []
with self._lock:
# wait for available data
while not self._queue and not self._connection.done:
self._waiting = True
self._cond_data_available.wait(1.0)
self._waiting = False
if self._queue:
self._cond_queue_swapped.notify()
# take all data from queue for processing outside of the lock
if self._queue:
queue = self._queue
self._queue = []
# relay all data
for data in queue:
try:
self._connection.write_data(data)
except Exception as e:
with self._cond:
self._error = e
|
https://github.com/ros/ros_comm/issues/369
|
Exception in thread Thread-23:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 504, in run
self.__target(*self.__args, **self.__kwargs)
File "/opt/ros/hydro/lib/python2.7/dist-packages/rospy/impl/tcpros_pubsub.py", line 431, in _run
with self._cond:
File "/opt/ros/hydro/lib/python2.7/dist-packages/rospy/impl/tcpros_pubsub.py", line 390, in __getattr__
return getattr(self._connection, name)
AttributeError: 'TCPROSTransport' object has no attribute '_cond'
|
AttributeError
|
def from_config(
cls,
config: Union[Dict[str, Any], Config] = {},
*,
vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(),
meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True,
validate: bool = True,
) -> "Language":
"""Create the nlp object from a loaded config. Will set up the tokenizer
and language data, add pipeline components etc. If no config is provided,
the default config of the given language is used.
config (Dict[str, Any] / Config): The loaded config.
vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable.
Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe.
exclude (Iterable[str]): Names of pipeline components to exclude.
Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta.
auto_fill (bool): Automatically fill in missing values in config based
on defaults and function argument annotations.
validate (bool): Validate the component config and arguments against
the types expected by the factory.
RETURNS (Language): The initialized Language class.
DOCS: https://spacy.io/api/language#from_config
"""
if auto_fill:
config = Config(cls.default_config, section_order=CONFIG_SECTION_ORDER).merge(
config
)
if "nlp" not in config:
raise ValueError(Errors.E985.format(config=config))
config_lang = config["nlp"].get("lang")
if config_lang is not None and config_lang != cls.lang:
raise ValueError(
Errors.E958.format(
bad_lang_code=config["nlp"]["lang"],
lang_code=cls.lang,
lang=util.get_object_name(cls),
)
)
config["nlp"]["lang"] = cls.lang
# This isn't very elegant, but we remove the [components] block here to prevent
# it from getting resolved (causes problems because we expect to pass in
# the nlp and name args for each component). If we're auto-filling, we're
# using the nlp.config with all defaults.
config = util.copy_config(config)
orig_pipeline = config.pop("components", {})
orig_pretraining = config.pop("pretraining", None)
config["components"] = {}
if auto_fill:
filled = registry.fill(config, validate=validate, schema=ConfigSchema)
else:
filled = config
filled["components"] = orig_pipeline
config["components"] = orig_pipeline
if orig_pretraining is not None:
filled["pretraining"] = orig_pretraining
config["pretraining"] = orig_pretraining
resolved_nlp = registry.resolve(
filled["nlp"], validate=validate, schema=ConfigSchemaNlp
)
create_tokenizer = resolved_nlp["tokenizer"]
before_creation = resolved_nlp["before_creation"]
after_creation = resolved_nlp["after_creation"]
after_pipeline_creation = resolved_nlp["after_pipeline_creation"]
lang_cls = cls
if before_creation is not None:
lang_cls = before_creation(cls)
if (
not isinstance(lang_cls, type)
or not issubclass(lang_cls, cls)
or lang_cls is not cls
):
raise ValueError(Errors.E943.format(value=type(lang_cls)))
# Warn about require_gpu usage in jupyter notebook
warn_if_jupyter_cupy()
# Note that we don't load vectors here, instead they get loaded explicitly
# inside stuff like the spacy train function. If we loaded them here,
# then we would load them twice at runtime: once when we make from config,
# and then again when we load from disk.
nlp = lang_cls(vocab=vocab, create_tokenizer=create_tokenizer, meta=meta)
if after_creation is not None:
nlp = after_creation(nlp)
if not isinstance(nlp, cls):
raise ValueError(Errors.E942.format(name="creation", value=type(nlp)))
# To create the components we need to use the final interpolated config
# so all values are available (if component configs use variables).
# Later we replace the component config with the raw config again.
interpolated = filled.interpolate() if not filled.is_interpolated else filled
pipeline = interpolated.get("components", {})
sourced = util.get_sourced_components(interpolated)
# If components are loaded from a source (existing models), we cache
# them here so they're only loaded once
source_nlps = {}
for pipe_name in config["nlp"]["pipeline"]:
if pipe_name not in pipeline:
opts = ", ".join(pipeline.keys())
raise ValueError(Errors.E956.format(name=pipe_name, opts=opts))
pipe_cfg = util.copy_config(pipeline[pipe_name])
raw_config = Config(filled["components"][pipe_name])
if pipe_name not in exclude:
if "factory" not in pipe_cfg and "source" not in pipe_cfg:
err = Errors.E984.format(name=pipe_name, config=pipe_cfg)
raise ValueError(err)
if "factory" in pipe_cfg:
factory = pipe_cfg.pop("factory")
# The pipe name (key in the config) here is the unique name
# of the component, not necessarily the factory
nlp.add_pipe(
factory,
name=pipe_name,
config=pipe_cfg,
validate=validate,
raw_config=raw_config,
)
else:
model = pipe_cfg["source"]
if model not in source_nlps:
# We only need the components here and we need to init
# model with the same vocab as the current nlp object
source_nlps[model] = util.load_model(model, vocab=nlp.vocab)
source_name = pipe_cfg.get("component", pipe_name)
nlp.add_pipe(source_name, source=source_nlps[model], name=pipe_name)
disabled_pipes = [*config["nlp"]["disabled"], *disable]
nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
nlp.batch_size = config["nlp"]["batch_size"]
nlp.config = filled if auto_fill else config
if after_pipeline_creation is not None:
nlp = after_pipeline_creation(nlp)
if not isinstance(nlp, cls):
raise ValueError(
Errors.E942.format(name="pipeline_creation", value=type(nlp))
)
# Detect components with listeners that are not frozen consistently
for name, proc in nlp.pipeline:
if getattr(proc, "listening_components", None): # e.g. tok2vec/transformer
for listener in proc.listening_components:
# If it's a component sourced from another pipeline, we check if
# the tok2vec listeners should be replaced with standalone tok2vec
# models (e.g. so component can be frozen without its performance
# degrading when other components/tok2vec are updated)
paths = sourced.get(listener, {}).get("replace_listeners", [])
if paths:
nlp.replace_listeners(name, listener, paths)
return nlp
|
def from_config(
cls,
config: Union[Dict[str, Any], Config] = {},
*,
vocab: Union[Vocab, bool] = True,
disable: Iterable[str] = SimpleFrozenList(),
exclude: Iterable[str] = SimpleFrozenList(),
meta: Dict[str, Any] = SimpleFrozenDict(),
auto_fill: bool = True,
validate: bool = True,
) -> "Language":
"""Create the nlp object from a loaded config. Will set up the tokenizer
and language data, add pipeline components etc. If no config is provided,
the default config of the given language is used.
config (Dict[str, Any] / Config): The loaded config.
vocab (Vocab): A Vocab object. If True, a vocab is created.
disable (Iterable[str]): Names of pipeline components to disable.
Disabled pipes will be loaded but they won't be run unless you
explicitly enable them by calling nlp.enable_pipe.
exclude (Iterable[str]): Names of pipeline components to exclude.
Excluded components won't be loaded.
meta (Dict[str, Any]): Meta overrides for nlp.meta.
auto_fill (bool): Automatically fill in missing values in config based
on defaults and function argument annotations.
validate (bool): Validate the component config and arguments against
the types expected by the factory.
RETURNS (Language): The initialized Language class.
DOCS: https://spacy.io/api/language#from_config
"""
if auto_fill:
config = Config(cls.default_config, section_order=CONFIG_SECTION_ORDER).merge(
config
)
if "nlp" not in config:
raise ValueError(Errors.E985.format(config=config))
config_lang = config["nlp"].get("lang")
if config_lang is not None and config_lang != cls.lang:
raise ValueError(
Errors.E958.format(
bad_lang_code=config["nlp"]["lang"],
lang_code=cls.lang,
lang=util.get_object_name(cls),
)
)
config["nlp"]["lang"] = cls.lang
# This isn't very elegant, but we remove the [components] block here to prevent
# it from getting resolved (causes problems because we expect to pass in
# the nlp and name args for each component). If we're auto-filling, we're
# using the nlp.config with all defaults.
config = util.copy_config(config)
orig_pipeline = config.pop("components", {})
orig_pretraining = config.pop("pretraining", None)
config["components"] = {}
if auto_fill:
filled = registry.fill(config, validate=validate, schema=ConfigSchema)
else:
filled = config
filled["components"] = orig_pipeline
config["components"] = orig_pipeline
if orig_pretraining is not None:
filled["pretraining"] = orig_pretraining
config["pretraining"] = orig_pretraining
resolved_nlp = registry.resolve(
filled["nlp"], validate=validate, schema=ConfigSchemaNlp
)
create_tokenizer = resolved_nlp["tokenizer"]
before_creation = resolved_nlp["before_creation"]
after_creation = resolved_nlp["after_creation"]
after_pipeline_creation = resolved_nlp["after_pipeline_creation"]
lang_cls = cls
if before_creation is not None:
lang_cls = before_creation(cls)
if (
not isinstance(lang_cls, type)
or not issubclass(lang_cls, cls)
or lang_cls is not cls
):
raise ValueError(Errors.E943.format(value=type(lang_cls)))
# Note that we don't load vectors here, instead they get loaded explicitly
# inside stuff like the spacy train function. If we loaded them here,
# then we would load them twice at runtime: once when we make from config,
# and then again when we load from disk.
nlp = lang_cls(vocab=vocab, create_tokenizer=create_tokenizer, meta=meta)
if after_creation is not None:
nlp = after_creation(nlp)
if not isinstance(nlp, cls):
raise ValueError(Errors.E942.format(name="creation", value=type(nlp)))
# To create the components we need to use the final interpolated config
# so all values are available (if component configs use variables).
# Later we replace the component config with the raw config again.
interpolated = filled.interpolate() if not filled.is_interpolated else filled
pipeline = interpolated.get("components", {})
sourced = util.get_sourced_components(interpolated)
# If components are loaded from a source (existing models), we cache
# them here so they're only loaded once
source_nlps = {}
for pipe_name in config["nlp"]["pipeline"]:
if pipe_name not in pipeline:
opts = ", ".join(pipeline.keys())
raise ValueError(Errors.E956.format(name=pipe_name, opts=opts))
pipe_cfg = util.copy_config(pipeline[pipe_name])
raw_config = Config(filled["components"][pipe_name])
if pipe_name not in exclude:
if "factory" not in pipe_cfg and "source" not in pipe_cfg:
err = Errors.E984.format(name=pipe_name, config=pipe_cfg)
raise ValueError(err)
if "factory" in pipe_cfg:
factory = pipe_cfg.pop("factory")
# The pipe name (key in the config) here is the unique name
# of the component, not necessarily the factory
nlp.add_pipe(
factory,
name=pipe_name,
config=pipe_cfg,
validate=validate,
raw_config=raw_config,
)
else:
model = pipe_cfg["source"]
if model not in source_nlps:
# We only need the components here and we need to init
# model with the same vocab as the current nlp object
source_nlps[model] = util.load_model(model, vocab=nlp.vocab)
source_name = pipe_cfg.get("component", pipe_name)
nlp.add_pipe(source_name, source=source_nlps[model], name=pipe_name)
disabled_pipes = [*config["nlp"]["disabled"], *disable]
nlp._disabled = set(p for p in disabled_pipes if p not in exclude)
nlp.batch_size = config["nlp"]["batch_size"]
nlp.config = filled if auto_fill else config
if after_pipeline_creation is not None:
nlp = after_pipeline_creation(nlp)
if not isinstance(nlp, cls):
raise ValueError(
Errors.E942.format(name="pipeline_creation", value=type(nlp))
)
# Detect components with listeners that are not frozen consistently
for name, proc in nlp.pipeline:
if getattr(proc, "listening_components", None): # e.g. tok2vec/transformer
for listener in proc.listening_components:
# If it's a component sourced from another pipeline, we check if
# the tok2vec listeners should be replaced with standalone tok2vec
# models (e.g. so component can be frozen without its performance
# degrading when other components/tok2vec are updated)
paths = sourced.get(listener, {}).get("replace_listeners", [])
if paths:
nlp.replace_listeners(name, listener, paths)
return nlp
|
https://github.com/explosion/spaCy/issues/6990
|
TypeError Traceback (most recent call last)
<ipython-input-12-66e94dc9d1fd> in <module>
1 sent = 'Hello World'
----> 2 doc = nlp(sent)
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy/language.py in __call__(self, text, disable, component_cfg)
992 raise ValueError(Errors.E109.format(name=name)) from e
993 except Exception as e:
--> 994 error_handler(name, proc, [doc], e)
995 if doc is None:
996 raise ValueError(Errors.E005.format(name=name))
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy/util.py in raise_error(proc_name, proc, docs, e)
1493
1494 def raise_error(proc_name, proc, docs, e):
-> 1495 raise e
1496
1497
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy/language.py in __call__(self, text, disable, component_cfg)
987 error_handler = proc.get_error_handler()
988 try:
--> 989 doc = proc(doc, **component_cfg.get(name, {}))
990 except KeyError as e:
991 # This typically happens if a component is not initialized
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy/pipeline/trainable_pipe.pyx in spacy.pipeline.trainable_pipe.TrainablePipe.__call__()
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy/util.py in raise_error(proc_name, proc, docs, e)
1493
1494 def raise_error(proc_name, proc, docs, e):
-> 1495 raise e
1496
1497
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy/pipeline/trainable_pipe.pyx in spacy.pipeline.trainable_pipe.TrainablePipe.__call__()
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy/pipeline/tagger.pyx in spacy.pipeline.tagger.Tagger.predict()
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/model.py in predict(self, X)
310 only the output, instead of the `(output, callback)` tuple.
311 """
--> 312 return self._func(self, X, is_train=False)[0]
313
314 def finish_update(self, optimizer: Optimizer) -> None:
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/layers/chain.py in forward(model, X, is_train)
52 callbacks = []
53 for layer in model.layers:
---> 54 Y, inc_layer_grad = layer(X, is_train=is_train)
55 callbacks.append(inc_layer_grad)
56 X = Y
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/model.py in __call__(self, X, is_train)
286 """Call the model's `forward` function, returning the output and a
287 callback to compute the gradients via backpropagation."""
--> 288 return self._func(self, X, is_train=is_train)
289
290 def initialize(self, X: Optional[InT] = None, Y: Optional[OutT] = None) -> "Model":
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/layers/chain.py in forward(model, X, is_train)
52 callbacks = []
53 for layer in model.layers:
---> 54 Y, inc_layer_grad = layer(X, is_train=is_train)
55 callbacks.append(inc_layer_grad)
56 X = Y
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/model.py in __call__(self, X, is_train)
286 """Call the model's `forward` function, returning the output and a
287 callback to compute the gradients via backpropagation."""
--> 288 return self._func(self, X, is_train=is_train)
289
290 def initialize(self, X: Optional[InT] = None, Y: Optional[OutT] = None) -> "Model":
~/anaconda3/envs/acl/lib/python3.8/site-packages/spacy_transformers/layers/trfs2arrays.py in forward(model, trf_datas, is_train)
26 src = model.ops.reshape2f(trf_data.tensors[t_i], -1, trf_data.width)
27 dst, get_d_src = apply_alignment(model.ops, trf_data.align, src)
---> 28 output, get_d_dst = pooling(dst, is_train)
29 outputs.append(output)
30 backprops.append((get_d_dst, get_d_src))
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/model.py in __call__(self, X, is_train)
286 """Call the model's `forward` function, returning the output and a
287 callback to compute the gradients via backpropagation."""
--> 288 return self._func(self, X, is_train=is_train)
289
290 def initialize(self, X: Optional[InT] = None, Y: Optional[OutT] = None) -> "Model":
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/layers/reduce_mean.py in forward(model, Xr, is_train)
16
17 def forward(model: Model[InT, OutT], Xr: InT, is_train: bool) -> Tuple[OutT, Callable]:
---> 18 Y = model.ops.reduce_mean(cast(Floats2d, Xr.data), Xr.lengths)
19 lengths = Xr.lengths
20
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/backends/numpy_ops.pyx in thinc.backends.numpy_ops.NumpyOps.reduce_mean()
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/backends/numpy_ops.cpython-38-x86_64-linux-gnu.so in View.MemoryView.memoryview_cwrapper()
~/anaconda3/envs/acl/lib/python3.8/site-packages/thinc/backends/numpy_ops.cpython-38-x86_64-linux-gnu.so in View.MemoryView.memoryview.__cinit__()
TypeError: a bytes-like object is required, not 'cupy.core.core.ndarray'
|
TypeError
|
def is_cython_func(func: Callable) -> bool:
"""Slightly hacky check for whether a callable is implemented in Cython.
Can be used to implement slightly different behaviors, especially around
inspecting and parameter annotations. Note that this will only return True
for actual cdef functions and methods, not regular Python functions defined
in Python modules.
func (Callable): The callable to check.
RETURNS (bool): Whether the callable is Cython (probably).
"""
attr = "__pyx_vtable__"
if hasattr(func, attr): # function or class instance
return True
# https://stackoverflow.com/a/55767059
if (
hasattr(func, "__qualname__")
and hasattr(func, "__module__")
and func.__module__ in sys.modules
): # method
cls_func = vars(sys.modules[func.__module__])[func.__qualname__.split(".")[0]]
return hasattr(cls_func, attr)
return False
|
def is_cython_func(func: Callable) -> bool:
"""Slightly hacky check for whether a callable is implemented in Cython.
Can be used to implement slightly different behaviors, especially around
inspecting and parameter annotations. Note that this will only return True
for actual cdef functions and methods, not regular Python functions defined
in Python modules.
func (Callable): The callable to check.
RETURNS (bool): Whether the callable is Cython (probably).
"""
attr = "__pyx_vtable__"
if hasattr(func, attr): # function or class instance
return True
# https://stackoverflow.com/a/55767059
if hasattr(func, "__qualname__") and hasattr(func, "__module__"): # method
cls_func = vars(sys.modules[func.__module__])[func.__qualname__.split(".")[0]]
return hasattr(cls_func, attr)
return False
|
https://github.com/explosion/spaCy/issues/7224
|
$ python -m spacy train test.cfg --code mycode.py
ℹ Using CPU
=========================== Initializing pipeline ===========================
Set up nlp object from config
Pipeline: ['lemmatizer']
Created vocabulary
Finished initializing nlp object
Traceback (most recent call last):
File "/usr/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/__main__.py", line 4, in <module>
setup_cli()
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/cli/_util.py", line 68, in setup_cli
command(prog_name=COMMAND)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/typer/main.py", line 497, in wrapper
return callback(**use_params) # type: ignore
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/cli/train.py", line 56, in train_cli
nlp = init_nlp(config, use_gpu=use_gpu)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/training/initialize.py", line 70, in init_nlp
nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer)
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/language.py", line 1243, in initialize
p_settings = validate_init_settings(
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/schemas.py", line 128, in validate_init_settings
schema = get_arg_model(func, exclude=exclude, name="InitArgModel")
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/schemas.py", line 100, in get_arg_model
default_empty = None if is_cython_func(func) else ...
File "/home/antti/spacy-custom-code/.venv/lib/python3.9/site-packages/spacy/util.py", line 1458, in is_cython_func
cls_func = vars(sys.modules[func.__module__])[func.__qualname__.split(".")[0]]
KeyError: 'python_code'
|
KeyError
|
def print_prf_per_type(
msg: Printer, scores: Dict[str, Dict[str, float]], name: str, type: str
) -> None:
data = []
for key, value in scores.items():
row = [key]
for k in ("p", "r", "f"):
v = value[k]
row.append(f"{v * 100:.2f}" if isinstance(v, (int, float)) else v)
data.append(row)
msg.table(
data,
header=("", "P", "R", "F"),
aligns=("l", "r", "r", "r"),
title=f"{name} (per {type})",
)
|
def print_prf_per_type(
msg: Printer, scores: Dict[str, Dict[str, float]], name: str, type: str
) -> None:
data = [
(k, f"{v['p'] * 100:.2f}", f"{v['r'] * 100:.2f}", f"{v['f'] * 100:.2f}")
for k, v in scores.items()
]
msg.table(
data,
header=("", "P", "R", "F"),
aligns=("l", "r", "r", "r"),
title=f"{name} (per {type})",
)
|
https://github.com/explosion/spaCy/issues/7019
|
Traceback (most recent call last):
File "/opt/miniconda3/envs/spacy/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/miniconda3/envs/spacy/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/__main__.py", line 4, in <module>
setup_cli()
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/_util.py", line 68, in setup_cli
command(prog_name=COMMAND)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/typer/main.py", line 497, in wrapper
return callback(**use_params) # type: ignore
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 50, in evaluate_cli
silent=False,
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 130, in evaluate
print_textcats_auc_per_cat(msg, scores["cats_auc_per_type"])
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 194, in print_textcats_auc_per_cat
[(k, f"{v:.2f}") for k, v in scores.items()],
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 194, in <listcomp>
[(k, f"{v:.2f}") for k, v in scores.items()],
TypeError: unsupported format string passed to NoneType.__format__
|
TypeError
|
def print_textcats_auc_per_cat(
msg: Printer, scores: Dict[str, Dict[str, float]]
) -> None:
msg.table(
[
(k, f"{v:.2f}" if isinstance(v, (float, int)) else v)
for k, v in scores.items()
],
header=("", "ROC AUC"),
aligns=("l", "r"),
title="Textcat ROC AUC (per label)",
)
|
def print_textcats_auc_per_cat(
msg: Printer, scores: Dict[str, Dict[str, float]]
) -> None:
msg.table(
[(k, f"{v:.2f}") for k, v in scores.items()],
header=("", "ROC AUC"),
aligns=("l", "r"),
title="Textcat ROC AUC (per label)",
)
|
https://github.com/explosion/spaCy/issues/7019
|
Traceback (most recent call last):
File "/opt/miniconda3/envs/spacy/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/miniconda3/envs/spacy/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/__main__.py", line 4, in <module>
setup_cli()
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/_util.py", line 68, in setup_cli
command(prog_name=COMMAND)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/typer/main.py", line 497, in wrapper
return callback(**use_params) # type: ignore
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 50, in evaluate_cli
silent=False,
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 130, in evaluate
print_textcats_auc_per_cat(msg, scores["cats_auc_per_type"])
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 194, in print_textcats_auc_per_cat
[(k, f"{v:.2f}") for k, v in scores.items()],
File "/opt/miniconda3/envs/spacy/lib/python3.6/site-packages/spacy/cli/evaluate.py", line 194, in <listcomp>
[(k, f"{v:.2f}") for k, v in scores.items()],
TypeError: unsupported format string passed to NoneType.__format__
|
TypeError
|
def get_loss(self, examples: Iterable[Example], sentence_encodings):
validate_examples(examples, "EntityLinker.get_loss")
entity_encodings = []
for eg in examples:
kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True)
for ent in eg.reference.ents:
kb_id = kb_ids[ent.start]
if kb_id:
entity_encoding = self.kb.get_vector(kb_id)
entity_encodings.append(entity_encoding)
entity_encodings = self.model.ops.asarray(entity_encodings, dtype="float32")
if sentence_encodings.shape != entity_encodings.shape:
err = Errors.E147.format(method="get_loss", msg="gold entities do not match up")
raise RuntimeError(err)
gradients = self.distance.get_grad(sentence_encodings, entity_encodings)
loss = self.distance.get_loss(sentence_encodings, entity_encodings)
loss = loss / len(entity_encodings)
return float(loss), gradients
|
def get_loss(self, examples: Iterable[Example], sentence_encodings):
validate_examples(examples, "EntityLinker.get_loss")
entity_encodings = []
for eg in examples:
kb_ids = eg.get_aligned("ENT_KB_ID", as_string=True)
for ent in eg.reference.ents:
kb_id = kb_ids[ent.start]
if kb_id:
entity_encoding = self.kb.get_vector(kb_id)
entity_encodings.append(entity_encoding)
entity_encodings = self.model.ops.asarray(entity_encodings, dtype="float32")
if sentence_encodings.shape != entity_encodings.shape:
err = Errors.E147.format(method="get_loss", msg="gold entities do not match up")
raise RuntimeError(err)
gradients = self.distance.get_grad(sentence_encodings, entity_encodings)
loss = self.distance.get_loss(sentence_encodings, entity_encodings)
loss = loss / len(entity_encodings)
return loss, gradients
|
https://github.com/explosion/spaCy/issues/6826
|
Running command: /home/joozty/Documents/projects/tutorials/nel_emerson/venv/bin/python -m spacy train configs/nel.cfg --output training --paths.train corpus/train.spacy --paths.dev corpus/dev.spacy --paths.kb temp/my_kb --paths.base_nlp temp/my_nlp -c scripts/custom_functions.py -g 0
ℹ Using GPU: 0
=========================== Initializing pipeline ===========================
Set up nlp object from config
Pipeline: ['sentencizer', 'entity_ruler', 'entity_linker']
Created vocabulary
Finished initializing nlp object
Initialized pipeline components: ['entity_linker']
✔ Initialized pipeline
============================= Training pipeline =============================
ℹ Pipeline: ['sentencizer', 'entity_ruler', 'entity_linker']
ℹ Frozen components: ['sentencizer', 'entity_ruler']
ℹ Initial learn rate: 0.001
E # LOSS ENTIT... SENTS_F SENTS_P SENTS_R ENTS_F ENTS_P ENTS_R NEL_MICRO_F NEL_MICRO_R NEL_MICRO_P SCORE
--- ------ ------------- ------- ------- ------- ------ ------ ------ ----------- ----------- ----------- ------
0 0 2.85 100.00 100.00 100.00 16.67 16.67 16.67 33.33 33.33 33.33 0.49
⚠ Aborting and saving the final best model. Encountered exception:
TypeError('array(2.8528614, dtype=float32) is not JSON serializable')
Traceback (most recent call last):
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/training/loop.py", line 114, in train
raise e
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/training/loop.py", line 104, in train
save_checkpoint(is_best_checkpoint)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/training/loop.py", line 67, in save_checkpoint
before_to_disk(nlp).to_disk(output_path / DIR_MODEL_LAST)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/language.py", line 1662, in to_disk
util.to_disk(path, serializers, exclude)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/util.py", line 1127, in to_disk
writer(path / key)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/language.py", line 1653, in <lambda>
serializers["meta.json"] = lambda p: srsly.write_json(p, self.meta)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/srsly/_json_api.py", line 72, in write_json
json_data = json_dumps(data, indent=indent)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/srsly/_json_api.py", line 26, in json_dumps
result = ujson.dumps(data, indent=indent, escape_forward_slashes=False)
TypeError: array(2.8528614, dtype=float32) is not JSON serializable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/__main__.py", line 4, in <module>
setup_cli()
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/cli/_util.py", line 65, in setup_cli
command(prog_name=COMMAND)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/typer/main.py", line 497, in wrapper
return callback(**use_params) # type: ignore
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/cli/train.py", line 59, in train_cli
train(nlp, output_path, use_gpu=use_gpu, stdout=sys.stdout, stderr=sys.stderr)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/training/loop.py", line 118, in train
save_checkpoint(False)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/training/loop.py", line 67, in save_checkpoint
before_to_disk(nlp).to_disk(output_path / DIR_MODEL_LAST)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/language.py", line 1662, in to_disk
util.to_disk(path, serializers, exclude)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/util.py", line 1127, in to_disk
writer(path / key)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/spacy/language.py", line 1653, in <lambda>
serializers["meta.json"] = lambda p: srsly.write_json(p, self.meta)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/srsly/_json_api.py", line 72, in write_json
json_data = json_dumps(data, indent=indent)
File "/home/joozty/Documents/projects/tutorials/nel_emerson/venv/lib/python3.8/site-packages/srsly/_json_api.py", line 26, in json_dumps
result = ujson.dumps(data, indent=indent, escape_forward_slashes=False)
TypeError: array(2.8528614, dtype=float32) is not JSON serializable
|
TypeError
|
def add_patterns(self, patterns):
"""Add patterns to the entitiy ruler. A pattern can either be a token
pattern (list of dicts) or a phrase pattern (string). For example:
{'label': 'ORG', 'pattern': 'Apple'}
{'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
patterns (list): The patterns to add.
DOCS: https://spacy.io/api/entityruler#add_patterns
"""
# disable the nlp components after this one in case they hadn't been initialized / deserialised yet
try:
current_index = -1
for i, (name, pipe) in enumerate(self.nlp.pipeline):
if self == pipe:
current_index = i
break
subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index + 1 :]]
except ValueError:
subsequent_pipes = []
with self.nlp.disable_pipes(subsequent_pipes):
token_patterns = []
phrase_pattern_labels = []
phrase_pattern_texts = []
phrase_pattern_ids = []
for entry in patterns:
if isinstance(entry["pattern"], basestring_):
phrase_pattern_labels.append(entry["label"])
phrase_pattern_texts.append(entry["pattern"])
phrase_pattern_ids.append(entry.get("id"))
elif isinstance(entry["pattern"], list):
token_patterns.append(entry)
phrase_patterns = []
for label, pattern, ent_id in zip(
phrase_pattern_labels,
self.nlp.pipe(phrase_pattern_texts),
phrase_pattern_ids,
):
phrase_pattern = {"label": label, "pattern": pattern, "id": ent_id}
if ent_id:
phrase_pattern["id"] = ent_id
phrase_patterns.append(phrase_pattern)
for entry in token_patterns + phrase_patterns:
label = entry["label"]
if "id" in entry:
ent_label = label
label = self._create_label(label, entry["id"])
key = self.matcher._normalize_key(label)
self._ent_ids[key] = (ent_label, entry["id"])
pattern = entry["pattern"]
if isinstance(pattern, Doc):
self.phrase_patterns[label].append(pattern)
elif isinstance(pattern, list):
self.token_patterns[label].append(pattern)
else:
raise ValueError(Errors.E097.format(pattern=pattern))
for label, patterns in self.token_patterns.items():
self.matcher.add(label, patterns)
for label, patterns in self.phrase_patterns.items():
self.phrase_matcher.add(label, patterns)
|
def add_patterns(self, patterns):
"""Add patterns to the entitiy ruler. A pattern can either be a token
pattern (list of dicts) or a phrase pattern (string). For example:
{'label': 'ORG', 'pattern': 'Apple'}
{'label': 'GPE', 'pattern': [{'lower': 'san'}, {'lower': 'francisco'}]}
patterns (list): The patterns to add.
DOCS: https://spacy.io/api/entityruler#add_patterns
"""
# disable the nlp components after this one in case they hadn't been initialized / deserialised yet
try:
current_index = self.nlp.pipe_names.index(self.name)
subsequent_pipes = [pipe for pipe in self.nlp.pipe_names[current_index + 1 :]]
except ValueError:
subsequent_pipes = []
with self.nlp.disable_pipes(subsequent_pipes):
token_patterns = []
phrase_pattern_labels = []
phrase_pattern_texts = []
phrase_pattern_ids = []
for entry in patterns:
if isinstance(entry["pattern"], basestring_):
phrase_pattern_labels.append(entry["label"])
phrase_pattern_texts.append(entry["pattern"])
phrase_pattern_ids.append(entry.get("id"))
elif isinstance(entry["pattern"], list):
token_patterns.append(entry)
phrase_patterns = []
for label, pattern, ent_id in zip(
phrase_pattern_labels,
self.nlp.pipe(phrase_pattern_texts),
phrase_pattern_ids,
):
phrase_pattern = {"label": label, "pattern": pattern, "id": ent_id}
if ent_id:
phrase_pattern["id"] = ent_id
phrase_patterns.append(phrase_pattern)
for entry in token_patterns + phrase_patterns:
label = entry["label"]
if "id" in entry:
ent_label = label
label = self._create_label(label, entry["id"])
key = self.matcher._normalize_key(label)
self._ent_ids[key] = (ent_label, entry["id"])
pattern = entry["pattern"]
if isinstance(pattern, Doc):
self.phrase_patterns[label].append(pattern)
elif isinstance(pattern, list):
self.token_patterns[label].append(pattern)
else:
raise ValueError(Errors.E097.format(pattern=pattern))
for label, patterns in self.token_patterns.items():
self.matcher.add(label, patterns)
for label, patterns in self.phrase_patterns.items():
self.phrase_matcher.add(label, patterns)
|
https://github.com/explosion/spaCy/issues/6518
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.8/site-packages/spacy/pipeline/entityruler.py", line 222, in add_patterns
for label, pattern, ent_id in zip(
File "/usr/local/lib/python3.8/site-packages/spacy/language.py", line 829, in pipe
for doc in docs:
File "/usr/local/lib/python3.8/site-packages/spacy/language.py", line 1150, in _pipe
doc = proc(doc, **kwargs)
File "<stdin>", line 3, in custom_component
Exception: Called custom_component.
|
Exception
|
def __call__(self, string, univ_pos, morphology=None):
lookup_table = self.lookups.get_table("lemma_lookup", {})
if "lemma_rules" not in self.lookups:
return [lookup_table.get(string, string)]
if univ_pos in (NOUN, "NOUN", "noun"):
univ_pos = "noun"
elif univ_pos in (VERB, "VERB", "verb"):
univ_pos = "verb"
elif univ_pos in (ADJ, "ADJ", "adj"):
univ_pos = "adj"
elif univ_pos in (ADP, "ADP", "adp"):
univ_pos = "adp"
elif univ_pos in (ADV, "ADV", "adv"):
univ_pos = "adv"
elif univ_pos in (AUX, "AUX", "aux"):
univ_pos = "aux"
elif univ_pos in (CCONJ, "CCONJ", "cconj"):
univ_pos = "cconj"
elif univ_pos in (DET, "DET", "det"):
univ_pos = "det"
elif univ_pos in (PRON, "PRON", "pron"):
univ_pos = "pron"
elif univ_pos in (PUNCT, "PUNCT", "punct"):
univ_pos = "punct"
elif univ_pos in (SCONJ, "SCONJ", "sconj"):
univ_pos = "sconj"
else:
return [self.lookup(string)]
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
lemmas = self.lemmatize(
string,
index_table.get(univ_pos, {}),
exc_table.get(univ_pos, {}),
rules_table.get(univ_pos, []),
)
return lemmas
|
def __call__(self, string, univ_pos, morphology=None):
lookup_table = self.lookups.get_table("lemma_lookup", {})
if "lemma_rules" not in self.lookups:
return [lookup_table.get(string, string)]
if univ_pos in (NOUN, "NOUN", "noun"):
univ_pos = "noun"
elif univ_pos in (VERB, "VERB", "verb"):
univ_pos = "verb"
elif univ_pos in (ADJ, "ADJ", "adj"):
univ_pos = "adj"
elif univ_pos in (ADP, "ADP", "adp"):
univ_pos = "adp"
elif univ_pos in (ADV, "ADV", "adv"):
univ_pos = "adv"
elif univ_pos in (AUX, "AUX", "aux"):
univ_pos = "aux"
elif univ_pos in (CCONJ, "CCONJ", "cconj"):
univ_pos = "cconj"
elif univ_pos in (DET, "DET", "det"):
univ_pos = "det"
elif univ_pos in (PRON, "PRON", "pron"):
univ_pos = "pron"
elif univ_pos in (PUNCT, "PUNCT", "punct"):
univ_pos = "punct"
elif univ_pos in (SCONJ, "SCONJ", "sconj"):
univ_pos = "sconj"
else:
return [self.lookup(string)]
# See Issue #435 for example of where this logic is requied.
if self.is_base_form(univ_pos, morphology):
return list(set([string.lower()]))
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
lemmas = self.lemmatize(
string,
index_table.get(univ_pos, {}),
exc_table.get(univ_pos, {}),
rules_table.get(univ_pos, []),
)
return lemmas
|
https://github.com/explosion/spaCy/issues/5728
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
in
2 nlp = spacy.load("fr_core_news_sm")
3
----> 4 doc = nlp("C'est une phrase.")
5 print([(w.text, w.pos_) for w in doc])
~/anaconda3/lib/python3.7/site-packages/spacy/language.py in __call__(self, text, disable, component_cfg)
447 if not hasattr(proc, "__call__"):
448 raise ValueError(Errors.E003.format(component=type(proc), name=name))
--> 449 doc = proc(doc, **component_cfg.get(name, {}))
450 if doc is None:
451 raise ValueError(Errors.E005.format(name=name))
pipes.pyx in spacy.pipeline.pipes.Tagger.__call__()
pipes.pyx in spacy.pipeline.pipes.Tagger.set_annotations()
morphology.pyx in spacy.morphology.Morphology.assign_tag_id()
morphology.pyx in spacy.morphology.Morphology.lemmatize()
~/anaconda3/lib/python3.7/site-packages/spacy/lang/fr/lemmatizer.py in __call__(self, string, univ_pos, morphology)
47 return [self.lookup(string)]
48 # See Issue #435 for example of where this logic is requied.
---> 49 if self.is_base_form(univ_pos, morphology):
50 return list(set([string.lower()]))
51 index_table = self.lookups.get_table("lemma_index", {})
TypeError: 'NoneType' object is not callable
|
TypeError
|
def load_model_from_path(model_path, meta=False, **overrides):
"""Load a model from a data directory path. Creates Language class with
pipeline from meta.json and then calls from_disk() with path."""
if not meta:
meta = get_model_meta(model_path)
# Support language factories registered via entry points (e.g. custom
# language subclass) while keeping top-level language identifier "lang"
lang = meta.get("lang_factory", meta["lang"])
cls = get_lang_class(lang)
nlp = cls(meta=meta, **overrides)
pipeline = meta.get("pipeline", [])
factories = meta.get("factories", {})
disable = overrides.get("disable", [])
if pipeline is True:
pipeline = nlp.Defaults.pipe_names
elif pipeline in (False, None):
pipeline = []
# skip "vocab" from overrides in component initialization since vocab is
# already configured from overrides when nlp is initialized above
if "vocab" in overrides:
del overrides["vocab"]
for name in pipeline:
if name not in disable:
config = meta.get("pipeline_args", {}).get(name, {})
config.update(overrides)
factory = factories.get(name, name)
component = nlp.create_pipe(factory, config=config)
nlp.add_pipe(component, name=name)
return nlp.from_disk(model_path, exclude=disable)
|
def load_model_from_path(model_path, meta=False, **overrides):
"""Load a model from a data directory path. Creates Language class with
pipeline from meta.json and then calls from_disk() with path."""
if not meta:
meta = get_model_meta(model_path)
# Support language factories registered via entry points (e.g. custom
# language subclass) while keeping top-level language identifier "lang"
lang = meta.get("lang_factory", meta["lang"])
cls = get_lang_class(lang)
nlp = cls(meta=meta, **overrides)
pipeline = meta.get("pipeline", [])
factories = meta.get("factories", {})
disable = overrides.get("disable", [])
if pipeline is True:
pipeline = nlp.Defaults.pipe_names
elif pipeline in (False, None):
pipeline = []
for name in pipeline:
if name not in disable:
config = meta.get("pipeline_args", {}).get(name, {})
config.update(overrides)
factory = factories.get(name, name)
component = nlp.create_pipe(factory, config=config)
nlp.add_pipe(component, name=name)
return nlp.from_disk(model_path, exclude=disable)
|
https://github.com/explosion/spaCy/issues/5620
|
py -m spacy train en C:\Work\ML\Spacy\dataset\model C:\Work\ML\Spacy\dataset\train C:\Work\ML\Spacy\dataset\valid -v en_core_web_md
Training pipeline: ['tagger', 'parser', 'ner']
Starting with blank model 'en'
Loading vector from model 'en_core_web_md'
Traceback (most recent call last):
File "C:\Program Files\Python\lib\runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Program Files\Python\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Program Files\Python\lib\site-packages\spacy\__main__.py", line 33, in <module>
plac.call(commands[command], sys.argv[1:])
File "C:\Program Files\Python\lib\site-packages\plac_core.py", line 367, in call
cmd, result = parser.consume(arglist)
File "C:\Program Files\Python\lib\site-packages\plac_core.py", line 232, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "C:\Program Files\Python\lib\site-packages\spacy\cli\train.py", line 266, in train
_load_vectors(nlp, vectors)
File "C:\Program Files\Python\lib\site-packages\spacy\cli\train.py", line 645, in _load_vectors
util.load_model(vectors, vocab=nlp.vocab)
File "C:\Program Files\Python\lib\site-packages\spacy\util.py", line 170, in load_model
return load_model_from_package(name, **overrides)
File "C:\Program Files\Python\lib\site-packages\spacy\util.py", line 191, in load_model_from_package
return cls.load(**overrides)
File "C:\Program Files\Python\lib\site-packages\en_core_web_md\__init__.py", line 12, in load
return load_model_from_init_py(__file__, **overrides)
File "C:\Program Files\Python\lib\site-packages\spacy\util.py", line 235, in load_model_from_init_py
return load_model_from_path(data_path, meta, **overrides)
File "C:\Program Files\Python\lib\site-packages\spacy\util.py", line 216, in load_model_from_path
component = nlp.create_pipe(factory, config=config)
File "C:\Program Files\Python\lib\site-packages\spacy\language.py", line 309, in create_pipe
return factory(self, **config)
File "C:\Program Files\Python\lib\site-packages\spacy\language.py", line 1080, in factory
return obj.from_nlp(nlp, **cfg)
File "pipes.pyx", line 62, in spacy.pipeline.pipes.Pipe.from_nlp
File "pipes.pyx", line 378, in spacy.pipeline.pipes.Tagger.__init__
TypeError: __init__() got multiple values for keyword argument 'vocab'
|
TypeError
|
def train(
lang,
output_path,
train_path,
dev_path,
raw_text=None,
base_model=None,
pipeline="tagger,parser,ner",
replace_components=False,
vectors=None,
width=96,
conv_depth=4,
cnn_window=1,
cnn_pieces=3,
use_chars=False,
bilstm_depth=0,
embed_rows=2000,
n_iter=30,
n_early_stopping=None,
n_examples=0,
use_gpu=-1,
version="0.0.0",
meta_path=None,
init_tok2vec=None,
parser_multitasks="",
entity_multitasks="",
noise_level=0.0,
orth_variant_level=0.0,
eval_beam_widths="",
gold_preproc=False,
learn_tokens=False,
textcat_multilabel=False,
textcat_arch="bow",
textcat_positive_label=None,
tag_map_path=None,
verbose=False,
debug=False,
):
"""
Train or update a spaCy model. Requires data to be formatted in spaCy's
JSON format. To convert data from other formats, use the `spacy convert`
command.
"""
util.fix_random_seed()
util.set_env_log(verbose)
# Make sure all files and paths exists if they are needed
train_path = util.ensure_path(train_path)
dev_path = util.ensure_path(dev_path)
meta_path = util.ensure_path(meta_path)
output_path = util.ensure_path(output_path)
if raw_text is not None:
raw_text = list(srsly.read_jsonl(raw_text))
if not train_path or not train_path.exists():
msg.fail("Training data not found", train_path, exits=1)
if not dev_path or not dev_path.exists():
msg.fail("Development data not found", dev_path, exits=1)
if meta_path is not None and not meta_path.exists():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta = srsly.read_json(meta_path) if meta_path else {}
if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:
msg.warn(
"Output directory is not empty",
"This can lead to unintended side effects when saving the model. "
"Please use an empty directory or a different path instead. If "
"the specified output path doesn't exist, the directory will be "
"created for you.",
)
if not output_path.exists():
output_path.mkdir()
msg.good("Created output directory: {}".format(output_path))
tag_map = {}
if tag_map_path is not None:
tag_map = srsly.read_json(tag_map_path)
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.2),
util.env_opt("dropout_to", 0.2),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 100.0),
util.env_opt("batch_to", 1000.0),
util.env_opt("batch_compound", 1.001),
)
if not eval_beam_widths:
eval_beam_widths = [1]
else:
eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(",")]
if 1 not in eval_beam_widths:
eval_beam_widths.append(1)
eval_beam_widths.sort()
has_beam_widths = eval_beam_widths != [1]
# Set up the base model and pipeline. If a base model is specified, load
# the model and make sure the pipeline matches the pipeline setting. If
# training starts from a blank model, intitalize the language class.
pipeline = [p.strip() for p in pipeline.split(",")]
disabled_pipes = None
pipes_added = False
msg.text("Training pipeline: {}".format(pipeline))
if use_gpu >= 0:
activated_gpu = None
try:
activated_gpu = set_gpu(use_gpu)
except Exception as e:
msg.warn("Exception: {}".format(e))
if activated_gpu is not None:
msg.text("Using GPU: {}".format(use_gpu))
else:
msg.warn("Unable to activate GPU: {}".format(use_gpu))
msg.text("Using CPU only")
use_gpu = -1
if base_model:
msg.text("Starting with base model '{}'".format(base_model))
nlp = util.load_model(base_model)
if nlp.lang != lang:
msg.fail(
"Model language ('{}') doesn't match language specified as "
"`lang` argument ('{}') ".format(nlp.lang, lang),
exits=1,
)
for pipe in pipeline:
pipe_cfg = {}
if pipe == "parser":
pipe_cfg = {"learn_tokens": learn_tokens}
elif pipe == "textcat":
pipe_cfg = {
"exclusive_classes": not textcat_multilabel,
"architecture": textcat_arch,
"positive_label": textcat_positive_label,
}
if pipe not in nlp.pipe_names:
msg.text("Adding component to base model '{}'".format(pipe))
nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))
pipes_added = True
elif replace_components:
msg.text("Replacing component from base model '{}'".format(pipe))
nlp.replace_pipe(pipe, nlp.create_pipe(pipe, config=pipe_cfg))
pipes_added = True
else:
if pipe == "textcat":
textcat_cfg = nlp.get_pipe("textcat").cfg
base_cfg = {
"exclusive_classes": textcat_cfg["exclusive_classes"],
"architecture": textcat_cfg["architecture"],
"positive_label": textcat_cfg["positive_label"],
}
if base_cfg != pipe_cfg:
msg.fail(
"The base textcat model configuration does"
"not match the provided training options. "
"Existing cfg: {}, provided cfg: {}".format(
base_cfg, pipe_cfg
),
exits=1,
)
msg.text("Extending component from base model '{}'".format(pipe))
disabled_pipes = nlp.disable_pipes(
[p for p in nlp.pipe_names if p not in pipeline]
)
else:
msg.text("Starting with blank model '{}'".format(lang))
lang_cls = util.get_lang_class(lang)
nlp = lang_cls()
for pipe in pipeline:
if pipe == "parser":
pipe_cfg = {"learn_tokens": learn_tokens}
elif pipe == "textcat":
pipe_cfg = {
"exclusive_classes": not textcat_multilabel,
"architecture": textcat_arch,
"positive_label": textcat_positive_label,
}
else:
pipe_cfg = {}
nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))
# Update tag map with provided mapping
nlp.vocab.morphology.tag_map.update(tag_map)
if vectors:
msg.text("Loading vector from model '{}'".format(vectors))
_load_vectors(nlp, vectors)
# Multitask objectives
multitask_options = [("parser", parser_multitasks), ("ner", entity_multitasks)]
for pipe_name, multitasks in multitask_options:
if multitasks:
if pipe_name not in pipeline:
msg.fail(
"Can't use multitask objective without '{}' in the pipeline".format(
pipe_name
)
)
pipe = nlp.get_pipe(pipe_name)
for objective in multitasks.split(","):
pipe.add_multitask_objective(objective)
# Prepare training corpus
msg.text("Counting training words (limit={})".format(n_examples))
corpus = GoldCorpus(train_path, dev_path, limit=n_examples)
n_train_words = corpus.count_train()
if base_model and not pipes_added:
# Start with an existing model, use default optimizer
optimizer = create_default_optimizer(Model.ops)
else:
# Start with a blank model, call begin_training
cfg = {"device": use_gpu}
cfg["conv_depth"] = conv_depth
cfg["token_vector_width"] = width
cfg["bilstm_depth"] = bilstm_depth
cfg["cnn_maxout_pieces"] = cnn_pieces
cfg["embed_size"] = embed_rows
cfg["conv_window"] = cnn_window
cfg["subword_features"] = not use_chars
optimizer = nlp.begin_training(lambda: corpus.train_tuples, **cfg)
nlp._optimizer = None
# Load in pretrained weights
if init_tok2vec is not None:
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
msg.text("Loaded pretrained tok2vec for: {}".format(components))
# Verify textcat config
if "textcat" in pipeline:
textcat_labels = nlp.get_pipe("textcat").cfg.get("labels", [])
if textcat_positive_label and textcat_positive_label not in textcat_labels:
msg.fail(
"The textcat_positive_label (tpl) '{}' does not match any "
"label in the training data.".format(textcat_positive_label),
exits=1,
)
if textcat_positive_label and len(textcat_labels) != 2:
msg.fail(
"A textcat_positive_label (tpl) '{}' was provided for training "
"data that does not appear to be a binary classification "
"problem with two labels.".format(textcat_positive_label),
exits=1,
)
train_docs = corpus.train_docs(
nlp,
noise_level=noise_level,
gold_preproc=gold_preproc,
max_length=0,
ignore_misaligned=True,
)
train_labels = set()
if textcat_multilabel:
multilabel_found = False
for text, gold in train_docs:
train_labels.update(gold.cats.keys())
if list(gold.cats.values()).count(1.0) != 1:
multilabel_found = True
if not multilabel_found and not base_model:
msg.warn(
"The textcat training instances look like they have "
"mutually-exclusive classes. Remove the flag "
"'--textcat-multilabel' to train a classifier with "
"mutually-exclusive classes."
)
if not textcat_multilabel:
for text, gold in train_docs:
train_labels.update(gold.cats.keys())
if list(gold.cats.values()).count(1.0) != 1 and not base_model:
msg.warn(
"Some textcat training instances do not have exactly "
"one positive label. Modifying training options to "
"include the flag '--textcat-multilabel' for classes "
"that are not mutually exclusive."
)
nlp.get_pipe("textcat").cfg["exclusive_classes"] = False
textcat_multilabel = True
break
if base_model and set(textcat_labels) != train_labels:
msg.fail(
"Cannot extend textcat model using data with different "
"labels. Base model labels: {}, training data labels: "
"{}.".format(textcat_labels, list(train_labels)),
exits=1,
)
if textcat_multilabel:
msg.text(
"Textcat evaluation score: ROC AUC score macro-averaged across "
"the labels '{}'".format(", ".join(textcat_labels))
)
elif textcat_positive_label and len(textcat_labels) == 2:
msg.text(
"Textcat evaluation score: F1-score for the label '{}'".format(
textcat_positive_label
)
)
elif len(textcat_labels) > 1:
if len(textcat_labels) == 2:
msg.warn(
"If the textcat component is a binary classifier with "
"exclusive classes, provide '--textcat_positive_label' for "
"an evaluation on the positive class."
)
msg.text(
"Textcat evaluation score: F1-score macro-averaged across "
"the labels '{}'".format(", ".join(textcat_labels))
)
else:
msg.fail(
"Unsupported textcat configuration. Use `spacy debug-data` "
"for more information."
)
# fmt: off
row_head, output_stats = _configure_training_output(pipeline, use_gpu, has_beam_widths)
row_widths = [len(w) for w in row_head]
row_settings = {"widths": row_widths, "aligns": tuple(["r" for i in row_head]), "spacing": 2}
# fmt: on
print("")
msg.row(row_head, **row_settings)
msg.row(["-" * width for width in row_settings["widths"]], **row_settings)
try:
iter_since_best = 0
best_score = 0.0
for i in range(n_iter):
train_docs = corpus.train_docs(
nlp,
noise_level=noise_level,
orth_variant_level=orth_variant_level,
gold_preproc=gold_preproc,
max_length=0,
ignore_misaligned=True,
)
if raw_text:
random.shuffle(raw_text)
raw_batches = util.minibatch(
(nlp.make_doc(rt["text"]) for rt in raw_text), size=8
)
words_seen = 0
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
losses = {}
for batch in util.minibatch_by_words(train_docs, size=batch_sizes):
if not batch:
continue
docs, golds = zip(*batch)
try:
nlp.update(
docs,
golds,
sgd=optimizer,
drop=next(dropout_rates),
losses=losses,
)
except ValueError as e:
err = "Error during training"
if init_tok2vec:
err += " Did you provide the same parameters during 'train' as during 'pretrain'?"
msg.fail(err, "Original error message: {}".format(e), exits=1)
if raw_text:
# If raw text is available, perform 'rehearsal' updates,
# which use unlabelled data to reduce overfitting.
raw_batch = list(next(raw_batches))
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)
if not int(os.environ.get("LOG_FRIENDLY", 0)):
pbar.update(sum(len(doc) for doc in docs))
words_seen += sum(len(doc) for doc in docs)
with nlp.use_params(optimizer.averages):
util.set_env_log(False)
epoch_model_path = output_path / ("model%d" % i)
nlp.to_disk(epoch_model_path)
nlp_loaded = util.load_model_from_path(epoch_model_path)
for beam_width in eval_beam_widths:
for name, component in nlp_loaded.pipeline:
if hasattr(component, "cfg"):
component.cfg["beam_width"] = beam_width
dev_docs = list(
corpus.dev_docs(
nlp_loaded,
gold_preproc=gold_preproc,
ignore_misaligned=True,
)
)
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs, verbose=verbose)
end_time = timer()
if use_gpu < 0:
gpu_wps = None
cpu_wps = nwords / (end_time - start_time)
else:
gpu_wps = nwords / (end_time - start_time)
with Model.use_device("cpu"):
nlp_loaded = util.load_model_from_path(epoch_model_path)
for name, component in nlp_loaded.pipeline:
if hasattr(component, "cfg"):
component.cfg["beam_width"] = beam_width
dev_docs = list(
corpus.dev_docs(
nlp_loaded,
gold_preproc=gold_preproc,
ignore_misaligned=True,
)
)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs, verbose=verbose)
end_time = timer()
cpu_wps = nwords / (end_time - start_time)
acc_loc = output_path / ("model%d" % i) / "accuracy.json"
srsly.write_json(acc_loc, scorer.scores)
# Update model meta.json
meta["lang"] = nlp.lang
meta["pipeline"] = nlp.pipe_names
meta["spacy_version"] = ">=%s" % about.__version__
if beam_width == 1:
meta["speed"] = {
"nwords": nwords,
"cpu": cpu_wps,
"gpu": gpu_wps,
}
meta.setdefault("accuracy", {})
for component in nlp.pipe_names:
for metric in _get_metrics(component):
meta["accuracy"][metric] = scorer.scores[metric]
else:
meta.setdefault("beam_accuracy", {})
meta.setdefault("beam_speed", {})
for component in nlp.pipe_names:
for metric in _get_metrics(component):
meta["beam_accuracy"][metric] = scorer.scores[metric]
meta["beam_speed"][beam_width] = {
"nwords": nwords,
"cpu": cpu_wps,
"gpu": gpu_wps,
}
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
"name": nlp.vocab.vectors.name,
}
meta.setdefault("name", "model%d" % i)
meta.setdefault("version", version)
meta["labels"] = nlp.meta["labels"]
meta_loc = output_path / ("model%d" % i) / "meta.json"
srsly.write_json(meta_loc, meta)
util.set_env_log(verbose)
progress = _get_progress(
i,
losses,
scorer.scores,
output_stats,
beam_width=beam_width if has_beam_widths else None,
cpu_wps=cpu_wps,
gpu_wps=gpu_wps,
)
if i == 0 and "textcat" in pipeline:
textcats_per_cat = scorer.scores.get("textcats_per_cat", {})
for cat, cat_score in textcats_per_cat.items():
if cat_score.get("roc_auc_score", 0) < 0:
msg.warn(
"Textcat ROC AUC score is undefined due to "
"only one value in label '{}'.".format(cat)
)
msg.row(progress, **row_settings)
# Early stopping
if n_early_stopping is not None:
current_score = _score_for_model(meta)
if current_score < best_score:
iter_since_best += 1
else:
iter_since_best = 0
best_score = current_score
if iter_since_best >= n_early_stopping:
msg.text(
"Early stopping, best iteration is: {}".format(
i - iter_since_best
)
)
msg.text(
"Best score = {}; Final iteration score = {}".format(
best_score, current_score
)
)
break
except Exception as e:
msg.warn(
"Aborting and saving the final best model. "
"Encountered exception: {}".format(e)
)
finally:
best_pipes = nlp.pipe_names
if disabled_pipes:
disabled_pipes.restore()
with nlp.use_params(optimizer.averages):
final_model_path = output_path / "model-final"
nlp.to_disk(final_model_path)
meta_loc = output_path / "model-final" / "meta.json"
final_meta = srsly.read_json(meta_loc)
final_meta.setdefault("accuracy", {})
final_meta["accuracy"].update(meta.get("accuracy", {}))
final_meta.setdefault("speed", {})
final_meta["speed"].setdefault("cpu", None)
final_meta["speed"].setdefault("gpu", None)
meta.setdefault("speed", {})
meta["speed"].setdefault("cpu", None)
meta["speed"].setdefault("gpu", None)
# combine cpu and gpu speeds with the base model speeds
if final_meta["speed"]["cpu"] and meta["speed"]["cpu"]:
speed = _get_total_speed(
[final_meta["speed"]["cpu"], meta["speed"]["cpu"]]
)
final_meta["speed"]["cpu"] = speed
if final_meta["speed"]["gpu"] and meta["speed"]["gpu"]:
speed = _get_total_speed(
[final_meta["speed"]["gpu"], meta["speed"]["gpu"]]
)
final_meta["speed"]["gpu"] = speed
# if there were no speeds to update, overwrite with meta
if (
final_meta["speed"]["cpu"] is None
and final_meta["speed"]["gpu"] is None
):
final_meta["speed"].update(meta["speed"])
# note: beam speeds are not combined with the base model
if has_beam_widths:
final_meta.setdefault("beam_accuracy", {})
final_meta["beam_accuracy"].update(meta.get("beam_accuracy", {}))
final_meta.setdefault("beam_speed", {})
final_meta["beam_speed"].update(meta.get("beam_speed", {}))
srsly.write_json(meta_loc, final_meta)
msg.good("Saved model to output directory", final_model_path)
with msg.loading("Creating best model..."):
best_model_path = _collate_best_model(final_meta, output_path, best_pipes)
msg.good("Created best model", best_model_path)
|
def train(
lang,
output_path,
train_path,
dev_path,
raw_text=None,
base_model=None,
pipeline="tagger,parser,ner",
replace_components=False,
vectors=None,
width=96,
conv_depth=4,
cnn_window=1,
cnn_pieces=3,
use_chars=False,
bilstm_depth=0,
embed_rows=2000,
n_iter=30,
n_early_stopping=None,
n_examples=0,
use_gpu=-1,
version="0.0.0",
meta_path=None,
init_tok2vec=None,
parser_multitasks="",
entity_multitasks="",
noise_level=0.0,
orth_variant_level=0.0,
eval_beam_widths="",
gold_preproc=False,
learn_tokens=False,
textcat_multilabel=False,
textcat_arch="bow",
textcat_positive_label=None,
tag_map_path=None,
verbose=False,
debug=False,
):
"""
Train or update a spaCy model. Requires data to be formatted in spaCy's
JSON format. To convert data from other formats, use the `spacy convert`
command.
"""
util.fix_random_seed()
util.set_env_log(verbose)
# Make sure all files and paths exists if they are needed
train_path = util.ensure_path(train_path)
dev_path = util.ensure_path(dev_path)
meta_path = util.ensure_path(meta_path)
output_path = util.ensure_path(output_path)
if raw_text is not None:
raw_text = list(srsly.read_jsonl(raw_text))
if not train_path or not train_path.exists():
msg.fail("Training data not found", train_path, exits=1)
if not dev_path or not dev_path.exists():
msg.fail("Development data not found", dev_path, exits=1)
if meta_path is not None and not meta_path.exists():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta = srsly.read_json(meta_path) if meta_path else {}
if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:
msg.warn(
"Output directory is not empty",
"This can lead to unintended side effects when saving the model. "
"Please use an empty directory or a different path instead. If "
"the specified output path doesn't exist, the directory will be "
"created for you.",
)
if not output_path.exists():
output_path.mkdir()
msg.good("Created output directory: {}".format(output_path))
tag_map = {}
if tag_map_path is not None:
tag_map = srsly.read_json(tag_map_path)
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.2),
util.env_opt("dropout_to", 0.2),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 100.0),
util.env_opt("batch_to", 1000.0),
util.env_opt("batch_compound", 1.001),
)
if not eval_beam_widths:
eval_beam_widths = [1]
else:
eval_beam_widths = [int(bw) for bw in eval_beam_widths.split(",")]
if 1 not in eval_beam_widths:
eval_beam_widths.append(1)
eval_beam_widths.sort()
has_beam_widths = eval_beam_widths != [1]
# Set up the base model and pipeline. If a base model is specified, load
# the model and make sure the pipeline matches the pipeline setting. If
# training starts from a blank model, intitalize the language class.
pipeline = [p.strip() for p in pipeline.split(",")]
disabled_pipes = None
pipes_added = False
msg.text("Training pipeline: {}".format(pipeline))
if use_gpu >= 0:
activated_gpu = None
try:
activated_gpu = set_gpu(use_gpu)
except Exception as e:
msg.warn("Exception: {}".format(e))
if activated_gpu is not None:
msg.text("Using GPU: {}".format(use_gpu))
else:
msg.warn("Unable to activate GPU: {}".format(use_gpu))
msg.text("Using CPU only")
use_gpu = -1
if base_model:
msg.text("Starting with base model '{}'".format(base_model))
nlp = util.load_model(base_model)
if nlp.lang != lang:
msg.fail(
"Model language ('{}') doesn't match language specified as "
"`lang` argument ('{}') ".format(nlp.lang, lang),
exits=1,
)
for pipe in pipeline:
pipe_cfg = {}
if pipe == "parser":
pipe_cfg = {"learn_tokens": learn_tokens}
elif pipe == "textcat":
pipe_cfg = {
"exclusive_classes": not textcat_multilabel,
"architecture": textcat_arch,
"positive_label": textcat_positive_label,
}
if pipe not in nlp.pipe_names:
msg.text("Adding component to base model '{}'".format(pipe))
nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))
pipes_added = True
elif replace_components:
msg.text("Replacing component from base model '{}'".format(pipe))
nlp.replace_pipe(pipe, nlp.create_pipe(pipe, config=pipe_cfg))
pipes_added = True
else:
if pipe == "textcat":
textcat_cfg = nlp.get_pipe("textcat").cfg
base_cfg = {
"exclusive_classes": textcat_cfg["exclusive_classes"],
"architecture": textcat_cfg["architecture"],
"positive_label": textcat_cfg["positive_label"],
}
if base_cfg != pipe_cfg:
msg.fail(
"The base textcat model configuration does"
"not match the provided training options. "
"Existing cfg: {}, provided cfg: {}".format(
base_cfg, pipe_cfg
),
exits=1,
)
msg.text("Extending component from base model '{}'".format(pipe))
disabled_pipes = nlp.disable_pipes(
[p for p in nlp.pipe_names if p not in pipeline]
)
else:
msg.text("Starting with blank model '{}'".format(lang))
lang_cls = util.get_lang_class(lang)
nlp = lang_cls()
for pipe in pipeline:
if pipe == "parser":
pipe_cfg = {"learn_tokens": learn_tokens}
elif pipe == "textcat":
pipe_cfg = {
"exclusive_classes": not textcat_multilabel,
"architecture": textcat_arch,
"positive_label": textcat_positive_label,
}
else:
pipe_cfg = {}
nlp.add_pipe(nlp.create_pipe(pipe, config=pipe_cfg))
# Update tag map with provided mapping
nlp.vocab.morphology.tag_map.update(tag_map)
if vectors:
msg.text("Loading vector from model '{}'".format(vectors))
_load_vectors(nlp, vectors)
# Multitask objectives
multitask_options = [("parser", parser_multitasks), ("ner", entity_multitasks)]
for pipe_name, multitasks in multitask_options:
if multitasks:
if pipe_name not in pipeline:
msg.fail(
"Can't use multitask objective without '{}' in the pipeline".format(
pipe_name
)
)
pipe = nlp.get_pipe(pipe_name)
for objective in multitasks.split(","):
pipe.add_multitask_objective(objective)
# Prepare training corpus
msg.text("Counting training words (limit={})".format(n_examples))
corpus = GoldCorpus(train_path, dev_path, limit=n_examples)
n_train_words = corpus.count_train()
if base_model and not pipes_added:
# Start with an existing model, use default optimizer
optimizer = create_default_optimizer(Model.ops)
else:
# Start with a blank model, call begin_training
cfg = {"device": use_gpu}
cfg["conv_depth"] = conv_depth
cfg["token_vector_width"] = width
cfg["bilstm_depth"] = bilstm_depth
cfg["cnn_maxout_pieces"] = cnn_pieces
cfg["embed_size"] = embed_rows
cfg["conv_window"] = cnn_window
cfg["subword_features"] = not use_chars
optimizer = nlp.begin_training(lambda: corpus.train_tuples, **cfg)
nlp._optimizer = None
# Load in pretrained weights
if init_tok2vec is not None:
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
msg.text("Loaded pretrained tok2vec for: {}".format(components))
# Verify textcat config
if "textcat" in pipeline:
textcat_labels = nlp.get_pipe("textcat").cfg.get("labels", [])
if textcat_positive_label and textcat_positive_label not in textcat_labels:
msg.fail(
"The textcat_positive_label (tpl) '{}' does not match any "
"label in the training data.".format(textcat_positive_label),
exits=1,
)
if textcat_positive_label and len(textcat_labels) != 2:
msg.fail(
"A textcat_positive_label (tpl) '{}' was provided for training "
"data that does not appear to be a binary classification "
"problem with two labels.".format(textcat_positive_label),
exits=1,
)
train_docs = corpus.train_docs(
nlp,
noise_level=noise_level,
gold_preproc=gold_preproc,
max_length=0,
ignore_misaligned=True,
)
train_labels = set()
if textcat_multilabel:
multilabel_found = False
for text, gold in train_docs:
train_labels.update(gold.cats.keys())
if list(gold.cats.values()).count(1.0) != 1:
multilabel_found = True
if not multilabel_found and not base_model:
msg.warn(
"The textcat training instances look like they have "
"mutually-exclusive classes. Remove the flag "
"'--textcat-multilabel' to train a classifier with "
"mutually-exclusive classes."
)
if not textcat_multilabel:
for text, gold in train_docs:
train_labels.update(gold.cats.keys())
if list(gold.cats.values()).count(1.0) != 1 and not base_model:
msg.warn(
"Some textcat training instances do not have exactly "
"one positive label. Modifying training options to "
"include the flag '--textcat-multilabel' for classes "
"that are not mutually exclusive."
)
nlp.get_pipe("textcat").cfg["exclusive_classes"] = False
textcat_multilabel = True
break
if base_model and set(textcat_labels) != train_labels:
msg.fail(
"Cannot extend textcat model using data with different "
"labels. Base model labels: {}, training data labels: "
"{}.".format(textcat_labels, list(train_labels)),
exits=1,
)
if textcat_multilabel:
msg.text(
"Textcat evaluation score: ROC AUC score macro-averaged across "
"the labels '{}'".format(", ".join(textcat_labels))
)
elif textcat_positive_label and len(textcat_labels) == 2:
msg.text(
"Textcat evaluation score: F1-score for the label '{}'".format(
textcat_positive_label
)
)
elif len(textcat_labels) > 1:
if len(textcat_labels) == 2:
msg.warn(
"If the textcat component is a binary classifier with "
"exclusive classes, provide '--textcat_positive_label' for "
"an evaluation on the positive class."
)
msg.text(
"Textcat evaluation score: F1-score macro-averaged across "
"the labels '{}'".format(", ".join(textcat_labels))
)
else:
msg.fail(
"Unsupported textcat configuration. Use `spacy debug-data` "
"for more information."
)
# fmt: off
row_head, output_stats = _configure_training_output(pipeline, use_gpu, has_beam_widths)
row_widths = [len(w) for w in row_head]
row_settings = {"widths": row_widths, "aligns": tuple(["r" for i in row_head]), "spacing": 2}
# fmt: on
print("")
msg.row(row_head, **row_settings)
msg.row(["-" * width for width in row_settings["widths"]], **row_settings)
try:
iter_since_best = 0
best_score = 0.0
for i in range(n_iter):
train_docs = corpus.train_docs(
nlp,
noise_level=noise_level,
orth_variant_level=orth_variant_level,
gold_preproc=gold_preproc,
max_length=0,
ignore_misaligned=True,
)
if raw_text:
random.shuffle(raw_text)
raw_batches = util.minibatch(
(nlp.make_doc(rt["text"]) for rt in raw_text), size=8
)
words_seen = 0
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
losses = {}
for batch in util.minibatch_by_words(train_docs, size=batch_sizes):
if not batch:
continue
docs, golds = zip(*batch)
try:
nlp.update(
docs,
golds,
sgd=optimizer,
drop=next(dropout_rates),
losses=losses,
)
except ValueError as e:
err = "Error during training"
if init_tok2vec:
err += " Did you provide the same parameters during 'train' as during 'pretrain'?"
msg.fail(err, "Original error message: {}".format(e), exits=1)
if raw_text:
# If raw text is available, perform 'rehearsal' updates,
# which use unlabelled data to reduce overfitting.
raw_batch = list(next(raw_batches))
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)
if not int(os.environ.get("LOG_FRIENDLY", 0)):
pbar.update(sum(len(doc) for doc in docs))
words_seen += sum(len(doc) for doc in docs)
with nlp.use_params(optimizer.averages):
util.set_env_log(False)
epoch_model_path = output_path / ("model%d" % i)
nlp.to_disk(epoch_model_path)
nlp_loaded = util.load_model_from_path(epoch_model_path)
for beam_width in eval_beam_widths:
for name, component in nlp_loaded.pipeline:
if hasattr(component, "cfg"):
component.cfg["beam_width"] = beam_width
dev_docs = list(
corpus.dev_docs(
nlp_loaded,
gold_preproc=gold_preproc,
ignore_misaligned=True,
)
)
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs, verbose=verbose)
end_time = timer()
if use_gpu < 0:
gpu_wps = None
cpu_wps = nwords / (end_time - start_time)
else:
gpu_wps = nwords / (end_time - start_time)
with Model.use_device("cpu"):
nlp_loaded = util.load_model_from_path(epoch_model_path)
for name, component in nlp_loaded.pipeline:
if hasattr(component, "cfg"):
component.cfg["beam_width"] = beam_width
dev_docs = list(
corpus.dev_docs(
nlp_loaded,
gold_preproc=gold_preproc,
ignore_misaligned=True,
)
)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs, verbose=verbose)
end_time = timer()
cpu_wps = nwords / (end_time - start_time)
acc_loc = output_path / ("model%d" % i) / "accuracy.json"
srsly.write_json(acc_loc, scorer.scores)
# Update model meta.json
meta["lang"] = nlp.lang
meta["pipeline"] = nlp.pipe_names
meta["spacy_version"] = ">=%s" % about.__version__
if beam_width == 1:
meta["speed"] = {
"nwords": nwords,
"cpu": cpu_wps,
"gpu": gpu_wps,
}
meta.setdefault("accuracy", {})
for component in nlp.pipe_names:
for metric in _get_metrics(component):
meta["accuracy"][metric] = scorer.scores[metric]
else:
meta.setdefault("beam_accuracy", {})
meta.setdefault("beam_speed", {})
for component in nlp.pipe_names:
for metric in _get_metrics(component):
meta["beam_accuracy"][metric] = scorer.scores[metric]
meta["beam_speed"][beam_width] = {
"nwords": nwords,
"cpu": cpu_wps,
"gpu": gpu_wps,
}
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
"name": nlp.vocab.vectors.name,
}
meta.setdefault("name", "model%d" % i)
meta.setdefault("version", version)
meta["labels"] = nlp.meta["labels"]
meta_loc = output_path / ("model%d" % i) / "meta.json"
srsly.write_json(meta_loc, meta)
util.set_env_log(verbose)
progress = _get_progress(
i,
losses,
scorer.scores,
output_stats,
beam_width=beam_width if has_beam_widths else None,
cpu_wps=cpu_wps,
gpu_wps=gpu_wps,
)
if i == 0 and "textcat" in pipeline:
textcats_per_cat = scorer.scores.get("textcats_per_cat", {})
for cat, cat_score in textcats_per_cat.items():
if cat_score.get("roc_auc_score", 0) < 0:
msg.warn(
"Textcat ROC AUC score is undefined due to "
"only one value in label '{}'.".format(cat)
)
msg.row(progress, **row_settings)
# Early stopping
if n_early_stopping is not None:
current_score = _score_for_model(meta)
if current_score < best_score:
iter_since_best += 1
else:
iter_since_best = 0
best_score = current_score
if iter_since_best >= n_early_stopping:
msg.text(
"Early stopping, best iteration is: {}".format(
i - iter_since_best
)
)
msg.text(
"Best score = {}; Final iteration score = {}".format(
best_score, current_score
)
)
break
except Exception as e:
msg.warn(
"Aborting and saving the final best model. "
"Encountered exception: {}".format(e)
)
finally:
best_pipes = nlp.pipe_names
if disabled_pipes:
disabled_pipes.restore()
with nlp.use_params(optimizer.averages):
final_model_path = output_path / "model-final"
nlp.to_disk(final_model_path)
meta_loc = output_path / "model-final" / "meta.json"
final_meta = srsly.read_json(meta_loc)
final_meta.setdefault("accuracy", {})
final_meta["accuracy"].update(meta.get("accuracy", {}))
final_meta.setdefault("speed", {})
final_meta["speed"].setdefault("cpu", None)
final_meta["speed"].setdefault("gpu", None)
# combine cpu and gpu speeds with the base model speeds
if final_meta["speed"]["cpu"] and meta["speed"]["cpu"]:
speed = _get_total_speed(
[final_meta["speed"]["cpu"], meta["speed"]["cpu"]]
)
final_meta["speed"]["cpu"] = speed
if final_meta["speed"]["gpu"] and meta["speed"]["gpu"]:
speed = _get_total_speed(
[final_meta["speed"]["gpu"], meta["speed"]["gpu"]]
)
final_meta["speed"]["gpu"] = speed
# if there were no speeds to update, overwrite with meta
if (
final_meta["speed"]["cpu"] is None
and final_meta["speed"]["gpu"] is None
):
final_meta["speed"].update(meta["speed"])
# note: beam speeds are not combined with the base model
if has_beam_widths:
final_meta.setdefault("beam_accuracy", {})
final_meta["beam_accuracy"].update(meta.get("beam_accuracy", {}))
final_meta.setdefault("beam_speed", {})
final_meta["beam_speed"].update(meta.get("beam_speed", {}))
srsly.write_json(meta_loc, final_meta)
msg.good("Saved model to output directory", final_model_path)
with msg.loading("Creating best model..."):
best_model_path = _collate_best_model(final_meta, output_path, best_pipes)
msg.good("Created best model", best_model_path)
|
https://github.com/explosion/spaCy/issues/5200
|
root@1c02094b4938:/project# spacy train \
--base-model en_core_oi_lg/en_core_oi_lg-0.0.3 \
--pipeline ner \
--n-iter 30 \
--n-early-stopping 5 \
en \
models \
data/ner/train \
data/ner/eval
✔ Created output directory: models
Training pipeline: ['ner']
Starting with base model 'en_core_oi_lg/en_core_oi_lg-0.0.3'
Extending component from base model 'ner'
Counting training words (limit=0)
Itn NER Loss NER P NER R NER F Token % CPU WPS
--- --------- ------ ------ ------ ------- -------
1 4857.485 67.342 58.462 62.588 92.721 17608
2 2574.152 74.643 68.901 71.657 92.721 19069
3 1822.187 77.081 74.286 75.658 92.721 19331
…
15 246.849 80.291 78.791 79.534 92.721 19075
16 242.276 80.518 78.571 79.533 92.721 18501
17 217.591 79.911 78.681 79.291 92.721 18839
Early stopping, best iteration is: 11
Best score = 79.919691339363; Final iteration score = 79.29442811710254
✔ Saved model to output directory
models/model-final
⠙ Creating best model...
Traceback (most recent call last):
File "/usr/local/lib/python3.8/runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.8/site-packages/spacy/__main__.py", line 33, in <module>
plac.call(commands[command], sys.argv[1:])
File "/usr/local/lib/python3.8/site-packages/plac_core.py", line 367, in call
cmd, result = parser.consume(arglist)
File "/usr/local/lib/python3.8/site-packages/plac_core.py", line 232, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "/usr/local/lib/python3.8/site-packages/spacy/cli/train.py", line 583, in train
best_model_path = _collate_best_model(final_meta, output_path, best_pipes)
File "/usr/local/lib/python3.8/site-packages/spacy/cli/train.py", line 643, in _collate_best_model
bests[component] = _find_best(output_path, component)
File "/usr/local/lib/python3.8/site-packages/spacy/cli/train.py", line 666, in _find_best
return max(accuracies)[1]
TypeError: '>' not supported between instances of 'dict' and 'dict'
|
TypeError
|
def _find_best(experiment_dir, component):
accuracies = []
for epoch_model in experiment_dir.iterdir():
if epoch_model.is_dir() and epoch_model.parts[-1] != "model-final":
accs = srsly.read_json(epoch_model / "accuracy.json")
scores = [accs.get(metric, 0.0) for metric in _get_metrics(component)]
# remove per_type dicts from score list for max() comparison
scores = [score for score in scores if isinstance(score, float)]
accuracies.append((scores, epoch_model))
if accuracies:
return max(accuracies)[1]
else:
return None
|
def _find_best(experiment_dir, component):
accuracies = []
for epoch_model in experiment_dir.iterdir():
if epoch_model.is_dir() and epoch_model.parts[-1] != "model-final":
accs = srsly.read_json(epoch_model / "accuracy.json")
scores = [accs.get(metric, 0.0) for metric in _get_metrics(component)]
accuracies.append((scores, epoch_model))
if accuracies:
return max(accuracies)[1]
else:
return None
|
https://github.com/explosion/spaCy/issues/5200
|
root@1c02094b4938:/project# spacy train \
--base-model en_core_oi_lg/en_core_oi_lg-0.0.3 \
--pipeline ner \
--n-iter 30 \
--n-early-stopping 5 \
en \
models \
data/ner/train \
data/ner/eval
✔ Created output directory: models
Training pipeline: ['ner']
Starting with base model 'en_core_oi_lg/en_core_oi_lg-0.0.3'
Extending component from base model 'ner'
Counting training words (limit=0)
Itn NER Loss NER P NER R NER F Token % CPU WPS
--- --------- ------ ------ ------ ------- -------
1 4857.485 67.342 58.462 62.588 92.721 17608
2 2574.152 74.643 68.901 71.657 92.721 19069
3 1822.187 77.081 74.286 75.658 92.721 19331
…
15 246.849 80.291 78.791 79.534 92.721 19075
16 242.276 80.518 78.571 79.533 92.721 18501
17 217.591 79.911 78.681 79.291 92.721 18839
Early stopping, best iteration is: 11
Best score = 79.919691339363; Final iteration score = 79.29442811710254
✔ Saved model to output directory
models/model-final
⠙ Creating best model...
Traceback (most recent call last):
File "/usr/local/lib/python3.8/runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.8/site-packages/spacy/__main__.py", line 33, in <module>
plac.call(commands[command], sys.argv[1:])
File "/usr/local/lib/python3.8/site-packages/plac_core.py", line 367, in call
cmd, result = parser.consume(arglist)
File "/usr/local/lib/python3.8/site-packages/plac_core.py", line 232, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "/usr/local/lib/python3.8/site-packages/spacy/cli/train.py", line 583, in train
best_model_path = _collate_best_model(final_meta, output_path, best_pipes)
File "/usr/local/lib/python3.8/site-packages/spacy/cli/train.py", line 643, in _collate_best_model
bests[component] = _find_best(output_path, component)
File "/usr/local/lib/python3.8/site-packages/spacy/cli/train.py", line 666, in _find_best
return max(accuracies)[1]
TypeError: '>' not supported between instances of 'dict' and 'dict'
|
TypeError
|
def to_bytes(self):
"""Serialize the DocBin's annotations to a bytestring.
RETURNS (bytes): The serialized DocBin.
DOCS: https://spacy.io/api/docbin#to_bytes
"""
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape # this should never happen
lengths = [len(tokens) for tokens in self.tokens]
tokens = numpy.vstack(self.tokens) if self.tokens else numpy.asarray([])
spaces = numpy.vstack(self.spaces) if self.spaces else numpy.asarray([])
msg = {
"attrs": self.attrs,
"tokens": tokens.tobytes("C"),
"spaces": spaces.tobytes("C"),
"lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
"strings": list(self.strings),
"cats": self.cats,
}
if self.store_user_data:
msg["user_data"] = self.user_data
return zlib.compress(srsly.msgpack_dumps(msg))
|
def to_bytes(self):
"""Serialize the DocBin's annotations to a bytestring.
RETURNS (bytes): The serialized DocBin.
DOCS: https://spacy.io/api/docbin#to_bytes
"""
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape # this should never happen
lengths = [len(tokens) for tokens in self.tokens]
msg = {
"attrs": self.attrs,
"tokens": numpy.vstack(self.tokens).tobytes("C"),
"spaces": numpy.vstack(self.spaces).tobytes("C"),
"lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
"strings": list(self.strings),
"cats": self.cats,
}
if self.store_user_data:
msg["user_data"] = self.user_data
return zlib.compress(srsly.msgpack_dumps(msg))
|
https://github.com/explosion/spaCy/issues/5141
|
ValueError Traceback (most recent call last)
<ipython-input-6-d51ca7c2f6fe> in <module>
----> 1 doc_bin_bytes = doc_bin.to_bytes()
~/anaconda3/envs/insights/lib/python3.7/site-packages/spacy/tokens/_serialize.py in to_bytes(self)
141 msg = {
142 "attrs": self.attrs,
--> 143 "tokens": numpy.vstack(self.tokens).tobytes("C"),
144 "spaces": numpy.vstack(self.spaces).tobytes("C"),
145 "lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
<__array_function__ internals> in vstack(*args, **kwargs)
~/anaconda3/envs/insights/lib/python3.7/site-packages/numpy/core/shape_base.py in vstack(tup)
281 if not isinstance(arrs, list):
282 arrs = [arrs]
--> 283 return _nx.concatenate(arrs, 0)
284
285
<__array_function__ internals> in concatenate(*args, **kwargs)
ValueError: need at least one array to concatenate
|
ValueError
|
def main(kb_path, vocab_path=None, output_dir=None, n_iter=50):
"""Create a blank model with the specified vocab, set up the pipeline and train the entity linker.
The `vocab` should be the one used during creation of the KB."""
vocab = Vocab().from_disk(vocab_path)
# create blank Language class with correct vocab
nlp = spacy.blank("en", vocab=vocab)
nlp.vocab.vectors.name = "spacy_pretrained_vectors"
print("Created blank 'en' model with vocab from '%s'" % vocab_path)
# Add a sentencizer component. Alternatively, add a dependency parser for higher accuracy.
nlp.add_pipe(nlp.create_pipe("sentencizer"))
# Add a custom component to recognize "Russ Cochran" as an entity for the example training data.
# Note that in a realistic application, an actual NER algorithm should be used instead.
ruler = EntityRuler(nlp)
patterns = [
{"label": "PERSON", "pattern": [{"LOWER": "russ"}, {"LOWER": "cochran"}]}
]
ruler.add_patterns(patterns)
nlp.add_pipe(ruler)
# Create the Entity Linker component and add it to the pipeline.
if "entity_linker" not in nlp.pipe_names:
# use only the predicted EL score and not the prior probability (for demo purposes)
cfg = {"incl_prior": False}
entity_linker = nlp.create_pipe("entity_linker", cfg)
kb = KnowledgeBase(vocab=nlp.vocab)
kb.load_bulk(kb_path)
print("Loaded Knowledge Base from '%s'" % kb_path)
entity_linker.set_kb(kb)
nlp.add_pipe(entity_linker, last=True)
# Convert the texts to docs to make sure we have doc.ents set for the training examples.
# Also ensure that the annotated examples correspond to known identifiers in the knowlege base.
kb_ids = nlp.get_pipe("entity_linker").kb.get_entity_strings()
TRAIN_DOCS = []
for text, annotation in TRAIN_DATA:
with nlp.disable_pipes("entity_linker"):
doc = nlp(text)
annotation_clean = annotation
for offset, kb_id_dict in annotation["links"].items():
new_dict = {}
for kb_id, value in kb_id_dict.items():
if kb_id in kb_ids:
new_dict[kb_id] = value
else:
print(
"Removed", kb_id, "from training because it is not in the KB."
)
annotation_clean["links"][offset] = new_dict
TRAIN_DOCS.append((doc, annotation_clean))
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "entity_linker"]
with nlp.disable_pipes(*other_pipes): # only train entity linker
# reset and initialize the weights randomly
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DOCS)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DOCS, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.2, # dropout - make it harder to memorise data
losses=losses,
sgd=optimizer,
)
print(itn, "Losses", losses)
# test the trained model
_apply_model(nlp)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print()
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
_apply_model(nlp2)
|
def main(kb_path, vocab_path=None, output_dir=None, n_iter=50):
"""Create a blank model with the specified vocab, set up the pipeline and train the entity linker.
The `vocab` should be the one used during creation of the KB."""
vocab = Vocab().from_disk(vocab_path)
# create blank Language class with correct vocab
nlp = spacy.blank("en", vocab=vocab)
nlp.vocab.vectors.name = "spacy_pretrained_vectors"
print("Created blank 'en' model with vocab from '%s'" % vocab_path)
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "entity_linker" not in nlp.pipe_names:
entity_linker = nlp.create_pipe("entity_linker")
kb = KnowledgeBase(vocab=nlp.vocab)
kb.load_bulk(kb_path)
print("Loaded Knowledge Base from '%s'" % kb_path)
entity_linker.set_kb(kb)
nlp.add_pipe(entity_linker, last=True)
else:
entity_linker = nlp.get_pipe("entity_linker")
kb = entity_linker.kb
# make sure the annotated examples correspond to known identifiers in the knowlege base
kb_ids = kb.get_entity_strings()
for text, annotation in TRAIN_DATA:
for offset, kb_id_dict in annotation["links"].items():
new_dict = {}
for kb_id, value in kb_id_dict.items():
if kb_id in kb_ids:
new_dict[kb_id] = value
else:
print(
"Removed", kb_id, "from training because it is not in the KB."
)
annotation["links"][offset] = new_dict
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "entity_linker"]
with nlp.disable_pipes(*other_pipes): # only train entity linker
# reset and initialize the weights randomly
optimizer = nlp.begin_training()
for itn in range(n_iter):
random.shuffle(TRAIN_DATA)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(
texts, # batch of texts
annotations, # batch of annotations
drop=0.2, # dropout - make it harder to memorise data
losses=losses,
sgd=optimizer,
)
print(itn, "Losses", losses)
# test the trained model
_apply_model(nlp)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print()
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
_apply_model(nlp2)
|
https://github.com/explosion/spaCy/issues/4723
|
Created blank 'en' model with vocab from 'tmp/vocab'
Loaded Knowledge Base from 'tmp/kb'
('Russ Cochran his reprints include EC Comics.', 'Russ Cochran captured his first major title with his son as caddie.', 'Russ Cochran has been publishing comic art.', "Russ Cochran was a member of University of Kentucky's golf team.") ({'links': {(0, 12): {'Q7381115': 1.0, 'Q2146908': 0.0}}}, {'links': {(0, 12): {'Q7381115': 0.0, 'Q2146908': 1.0}}}, {'links': {(0, 12): {'Q7381115': 1.0, 'Q2146908': 0.0}}}, {'links': {(0, 12): {'Q7381115': 0.0, 'Q2146908': 1.0}}})
Traceback (most recent call last):
File "train_entity_linker.py", line 167, in <module>
plac.call(main)
File "/Users/johngiorgi/miniconda3/envs/el/lib/python3.7/site-packages/plac_core.py", line 367, in call
cmd, result = parser.consume(arglist)
File "/Users/johngiorgi/miniconda3/envs/el/lib/python3.7/site-packages/plac_core.py", line 232, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "train_entity_linker.py", line 127, in main
sgd=optimizer,
File "/Users/johngiorgi/miniconda3/envs/el/lib/python3.7/site-packages/spacy/language.py", line 515, in update
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
File "pipes.pyx", line 1219, in spacy.pipeline.pipes.EntityLinker.update
KeyError: (0, 12)
|
KeyError
|
def _apply_model(nlp):
for text, annotation in TRAIN_DATA:
# apply the entity linker which will now make predictions for the 'Russ Cochran' entities
doc = nlp(text)
print()
print("Entities", [(ent.text, ent.label_, ent.kb_id_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_kb_id_) for t in doc])
|
def _apply_model(nlp):
for text, annotation in TRAIN_DATA:
doc = nlp.tokenizer(text)
# set entities so the evaluation is independent of the NER step
# all the examples contain 'Russ Cochran' as the first two tokens in the sentence
rc_ent = Span(doc, 0, 2, label=PERSON)
doc.ents = [rc_ent]
# apply the entity linker which will now make predictions for the 'Russ Cochran' entities
doc = nlp.get_pipe("entity_linker")(doc)
print()
print("Entities", [(ent.text, ent.label_, ent.kb_id_) for ent in doc.ents])
print("Tokens", [(t.text, t.ent_type_, t.ent_kb_id_) for t in doc])
|
https://github.com/explosion/spaCy/issues/4723
|
Created blank 'en' model with vocab from 'tmp/vocab'
Loaded Knowledge Base from 'tmp/kb'
('Russ Cochran his reprints include EC Comics.', 'Russ Cochran captured his first major title with his son as caddie.', 'Russ Cochran has been publishing comic art.', "Russ Cochran was a member of University of Kentucky's golf team.") ({'links': {(0, 12): {'Q7381115': 1.0, 'Q2146908': 0.0}}}, {'links': {(0, 12): {'Q7381115': 0.0, 'Q2146908': 1.0}}}, {'links': {(0, 12): {'Q7381115': 1.0, 'Q2146908': 0.0}}}, {'links': {(0, 12): {'Q7381115': 0.0, 'Q2146908': 1.0}}})
Traceback (most recent call last):
File "train_entity_linker.py", line 167, in <module>
plac.call(main)
File "/Users/johngiorgi/miniconda3/envs/el/lib/python3.7/site-packages/plac_core.py", line 367, in call
cmd, result = parser.consume(arglist)
File "/Users/johngiorgi/miniconda3/envs/el/lib/python3.7/site-packages/plac_core.py", line 232, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "train_entity_linker.py", line 127, in main
sgd=optimizer,
File "/Users/johngiorgi/miniconda3/envs/el/lib/python3.7/site-packages/spacy/language.py", line 515, in update
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
File "pipes.pyx", line 1219, in spacy.pipeline.pipes.EntityLinker.update
KeyError: (0, 12)
|
KeyError
|
def _collate_best_model(meta, output_path, components):
bests = {}
for component in components:
bests[component] = _find_best(output_path, component)
best_dest = output_path / "model-best"
shutil.copytree(path2str(output_path / "model-final"), path2str(best_dest))
for component, best_component_src in bests.items():
shutil.rmtree(path2str(best_dest / component))
shutil.copytree(
path2str(best_component_src / component), path2str(best_dest / component)
)
accs = srsly.read_json(best_component_src / "accuracy.json")
for metric in _get_metrics(component):
meta["accuracy"][metric] = accs[metric]
srsly.write_json(best_dest / "meta.json", meta)
return best_dest
|
def _collate_best_model(meta, output_path, components):
bests = {}
for component in components:
bests[component] = _find_best(output_path, component)
best_dest = output_path / "model-best"
shutil.copytree(output_path / "model-final", best_dest)
for component, best_component_src in bests.items():
shutil.rmtree(best_dest / component)
shutil.copytree(best_component_src / component, best_dest / component)
accs = srsly.read_json(best_component_src / "accuracy.json")
for metric in _get_metrics(component):
meta["accuracy"][metric] = accs[metric]
srsly.write_json(best_dest / "meta.json", meta)
return best_dest
|
https://github.com/explosion/spaCy/issues/3713
|
File "[...]/lib/python3.5/site-packages/spacy/cli/train.py", line 382, in _collate_best_model
shutil.copytree(output_path / "model-final", best_dest)
File "[...]/lib/python3.5/shutil.py", line 309, in copytree
names = os.listdir(src)
TypeError: argument should be string, bytes or integer, not PosixPath
Exception ignored in: <bound method GoldCorpus.__del__ of <spacy.gold.GoldCorpus object at 0x7f6e53d262b0>>
Traceback (most recent call last):
File "gold.pyx", line 116, in spacy.gold.GoldCorpus.__del__
File "[...]/lib/python3.5/shutil.py", line 471, in rmtree
onerror(os.lstat, path, sys.exc_info())
File "[...]/lib/python3.5/shutil.py", line 469, in rmtree
orig_st = os.lstat(path)
TypeError: lstat: illegal type for path parameter
|
TypeError
|
def build_bow_text_classifier(
nr_class, ngram_size=1, exclusive_classes=False, no_output_layer=False, **cfg
):
with Model.define_operators({">>": chain}):
model = with_cpu(
Model.ops, extract_ngrams(ngram_size, attr=ORTH) >> LinearModel(nr_class)
)
if not no_output_layer:
model = model >> (cpu_softmax if exclusive_classes else logistic)
model.nO = nr_class
return model
|
def build_bow_text_classifier(
nr_class, ngram_size=1, exclusive_classes=False, no_output_layer=False, **cfg
):
with Model.define_operators({">>": chain}):
model = extract_ngrams(ngram_size, attr=ORTH) >> with_cpu(
Model.ops, LinearModel(nr_class)
)
if not no_output_layer:
model = model >> (cpu_softmax if exclusive_classes else logistic)
model.nO = nr_class
return model
|
https://github.com/explosion/spaCy/issues/3473
|
Traceback (most recent call last):
...
nlp.update(texts, annotations, sgd=optimizer, drop=0.5, losses=losses)
File "venv/lib/python3.7/site-packages/spacy/language.py", line 452, in update
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
File "pipes.pyx", line 931, in spacy.pipeline.pipes.TextCategorizer.update
File "venv/lib/python3.7/site-packages/thinc/neural/_classes/feed_forward.py", line 46, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "venv/lib/python3.7/site-packages/thinc/api.py", line 132, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "venv/lib/python3.7/site-packages/thinc/api.py", line 132, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "venv/lib/python3.7/site-packages/thinc/api.py", line 225, in wrap
output = func(*args, **kwargs)
File "venv/lib/python3.7/site-packages/thinc/neural/_classes/feed_forward.py", line 46, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "venv/lib/python3.7/site-packages/spacy/_ml.py", line 138, in begin_update
keys = self.ops.xp.concatenate(ngrams)
File "venv/lib/python3.7/site-packages/cupy/manipulation/join.py", line 49, in concatenate
return core.concatenate_method(tup, axis)
File "cupy/core/_routines_manipulation.pyx", line 561, in cupy.core._routines_manipulation.concatenate_method
File "cupy/core/_routines_manipulation.pyx", line 574, in cupy.core._routines_manipulation.concatenate_method
TypeError: Only cupy arrays can be concatenated
|
TypeError
|
def POS_tree(root, light=False, flat=False):
"""Helper: generate a POS tree for a root token. The doc must have
`merge_ents(doc)` ran on it.
"""
subtree = format_POS(root, light=light, flat=flat)
if not flat:
for c in root.children:
subtree["modifiers"].append(POS_tree(c))
return subtree
|
def POS_tree(root, light=False, flat=False):
"""Helper: generate a POS tree for a root token. The doc must have
`merge_ents(doc)` ran on it.
"""
subtree = format_POS(root, light=light, flat=flat)
for c in root.children:
subtree["modifiers"].append(POS_tree(c))
return subtree
|
https://github.com/explosion/spaCy/issues/3150
|
$ python
Python 3.7.2 (default, Jan 3 2019, 02:55:40)
[GCC 8.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
import spacy
nlp = spacy.load('en_core_web_sm')
doc = nlp('Alice ate the pizza')
doc.print_tree(flat=True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "doc.pyx", line 983, in spacy.tokens.doc.Doc.print_tree
File "/venv/lib/python3.7/site-packages/spacy/tokens/printers.py", line 74, in parse_tree
for sent in doc_clone.sents]
File "/venv/lib/python3.7/site-packages/spacy/tokens/printers.py", line 74, in <listcomp>
for sent in doc_clone.sents]
File "/venv/lib/python3.7/site-packages/spacy/tokens/printers.py", line 41, in POS_tree
subtree["modifiers"].append(POS_tree(c))
KeyError: 'modifiers'
|
KeyError
|
def generate_meta(model_path, existing_meta, msg):
meta = existing_meta or {}
settings = [
("lang", "Model language", meta.get("lang", "en")),
("name", "Model name", meta.get("name", "model")),
("version", "Model version", meta.get("version", "0.0.0")),
("spacy_version", "Required spaCy version", ">=%s,<3.0.0" % about.__version__),
("description", "Model description", meta.get("description", False)),
("author", "Author", meta.get("author", False)),
("email", "Author email", meta.get("email", False)),
("url", "Author website", meta.get("url", False)),
("license", "License", meta.get("license", "CC BY-SA 3.0")),
]
nlp = util.load_model_from_path(Path(model_path))
meta["pipeline"] = nlp.pipe_names
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
"name": nlp.vocab.vectors.name,
}
msg.divider("Generating meta.json")
msg.text(
"Enter the package settings for your model. The following information "
"will be read from your model data: pipeline, vectors."
)
for setting, desc, default in settings:
response = get_raw_input(desc, default)
meta[setting] = default if response == "" and default else response
if about.__title__ != "spacy":
meta["parent_package"] = about.__title__
return meta
|
def generate_meta(model_path, existing_meta, msg):
meta = existing_meta or {}
settings = [
("lang", "Model language", meta.get("lang", "en")),
("name", "Model name", meta.get("name", "model")),
("version", "Model version", meta.get("version", "0.0.0")),
("spacy_version", "Required spaCy version", ">=%s,<3.0.0" % about.__version__),
("description", "Model description", meta.get("description", False)),
("author", "Author", meta.get("author", False)),
("email", "Author email", meta.get("email", False)),
("url", "Author website", meta.get("url", False)),
("license", "License", meta.get("license", "CC BY-SA 3.0")),
]
nlp = util.load_model_from_path(Path(model_path))
meta["pipeline"] = nlp.pipe_names
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
}
msg.divider("Generating meta.json")
msg.text(
"Enter the package settings for your model. The following information "
"will be read from your model data: pipeline, vectors."
)
for setting, desc, default in settings:
response = get_raw_input(desc, default)
meta[setting] = default if response == "" and default else response
if about.__title__ != "spacy":
meta["parent_package"] = about.__title__
return meta
|
https://github.com/explosion/spaCy/issues/3093
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/__init__.py", line 21, in load
return util.load_model(name, **overrides)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 116, in load_model
return load_model_from_path(Path(name), **overrides)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 156, in load_model_from_path
return nlp.from_disk(model_path)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/language.py", line 647, in from_disk
util.from_disk(path, deserializers, exclude)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 511, in from_disk
reader(path / key)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/language.py", line 643, in <lambda>
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, vocab=False)
File "pipeline.pyx", line 643, in spacy.pipeline.Tagger.from_disk
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 511, in from_disk
reader(path / key)
File "pipeline.pyx", line 625, in spacy.pipeline.Tagger.from_disk.load_model
File "pipeline.pyx", line 535, in spacy.pipeline.Tagger.Model
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/_ml.py", line 447, in build_tagger_model
pretrained_vectors=pretrained_vectors)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/_ml.py", line 278, in Tok2Vec
glove = StaticVectors(pretrained_vectors, width, column=cols.index(ID))
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 41, in __init__
vectors = self.get_vectors()
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 52, in get_vectors
return get_vectors(self.ops, self.lang)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/extra/load_nlp.py", line 19, in get_vectors
nlp = get_spacy(lang)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/extra/load_nlp.py", line 11, in get_spacy
SPACY_MODELS[lang] = spacy.load(lang, **kwargs)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/__init__.py", line 21, in load
return util.load_model(name, **overrides)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 119, in load_model
raise IOError(Errors.E050.format(name=name))
OSError: [E050] Can't find model 'nb_model.vectors'. It doesn't seem to be a shortcut link, a Python package or a valid path to a data directory.
|
OSError
|
def train(
lang,
output_path,
train_path,
dev_path,
raw_text=None,
base_model=None,
pipeline="tagger,parser,ner",
vectors=None,
n_iter=30,
n_examples=0,
use_gpu=-1,
version="0.0.0",
meta_path=None,
init_tok2vec=None,
parser_multitasks="",
entity_multitasks="",
noise_level=0.0,
gold_preproc=False,
learn_tokens=False,
verbose=False,
debug=False,
):
"""
Train or update a spaCy model. Requires data to be formatted in spaCy's
JSON format. To convert data from other formats, use the `spacy convert`
command.
"""
msg = Printer()
util.fix_random_seed()
util.set_env_log(verbose)
# Make sure all files and paths exists if they are needed
train_path = util.ensure_path(train_path)
dev_path = util.ensure_path(dev_path)
meta_path = util.ensure_path(meta_path)
if raw_text is not None:
raw_text = list(srsly.read_jsonl(raw_text))
if not train_path or not train_path.exists():
msg.fail("Training data not found", train_path, exits=1)
if not dev_path or not dev_path.exists():
msg.fail("Development data not found", dev_path, exits=1)
if meta_path is not None and not meta_path.exists():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta = srsly.read_json(meta_path) if meta_path else {}
if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:
msg.warn(
"Output directory is not empty",
"This can lead to unintended side effects when saving the model. "
"Please use an empty directory or a different path instead. If "
"the specified output path doesn't exist, the directory will be "
"created for you.",
)
if not output_path.exists():
output_path.mkdir()
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.2),
util.env_opt("dropout_to", 0.2),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 100.0),
util.env_opt("batch_to", 1000.0),
util.env_opt("batch_compound", 1.001),
)
# Set up the base model and pipeline. If a base model is specified, load
# the model and make sure the pipeline matches the pipeline setting. If
# training starts from a blank model, intitalize the language class.
pipeline = [p.strip() for p in pipeline.split(",")]
msg.text("Training pipeline: {}".format(pipeline))
if base_model:
msg.text("Starting with base model '{}'".format(base_model))
nlp = util.load_model(base_model)
if nlp.lang != lang:
msg.fail(
"Model language ('{}') doesn't match language specified as "
"`lang` argument ('{}') ".format(nlp.lang, lang),
exits=1,
)
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipeline]
nlp.disable_pipes(*other_pipes)
for pipe in pipeline:
if pipe not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe(pipe))
else:
msg.text("Starting with blank model '{}'".format(lang))
lang_cls = util.get_lang_class(lang)
nlp = lang_cls()
for pipe in pipeline:
nlp.add_pipe(nlp.create_pipe(pipe))
if learn_tokens:
nlp.add_pipe(nlp.create_pipe("merge_subtokens"))
if vectors:
msg.text("Loading vector from model '{}'".format(vectors))
_load_vectors(nlp, vectors)
# Multitask objectives
multitask_options = [("parser", parser_multitasks), ("ner", entity_multitasks)]
for pipe_name, multitasks in multitask_options:
if multitasks:
if pipe_name not in pipeline:
msg.fail(
"Can't use multitask objective without '{}' in the pipeline".format(
pipe_name
)
)
pipe = nlp.get_pipe(pipe_name)
for objective in multitasks.split(","):
pipe.add_multitask_objective(objective)
# Prepare training corpus
msg.text("Counting training words (limit={})".format(n_examples))
corpus = GoldCorpus(train_path, dev_path, limit=n_examples)
n_train_words = corpus.count_train()
if base_model:
# Start with an existing model, use default optimizer
optimizer = create_default_optimizer(Model.ops)
else:
# Start with a blank model, call begin_training
optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu)
nlp._optimizer = None
# Load in pre-trained weights
if init_tok2vec is not None:
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
msg.text("Loaded pretrained tok2vec for: {}".format(components))
# fmt: off
row_head = ("Itn", "Dep Loss", "NER Loss", "UAS", "NER P", "NER R", "NER F", "Tag %", "Token %", "CPU WPS", "GPU WPS")
row_settings = {
"widths": (3, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7),
"aligns": tuple(["r" for i in row_head]),
"spacing": 2
}
# fmt: on
print("")
msg.row(row_head, **row_settings)
msg.row(["-" * width for width in row_settings["widths"]], **row_settings)
try:
for i in range(n_iter):
train_docs = corpus.train_docs(
nlp, noise_level=noise_level, gold_preproc=gold_preproc, max_length=0
)
if raw_text:
random.shuffle(raw_text)
raw_batches = util.minibatch(
(nlp.make_doc(rt["text"]) for rt in raw_text), size=8
)
words_seen = 0
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
losses = {}
for batch in util.minibatch_by_words(train_docs, size=batch_sizes):
if not batch:
continue
docs, golds = zip(*batch)
nlp.update(
docs,
golds,
sgd=optimizer,
drop=next(dropout_rates),
losses=losses,
)
if raw_text:
# If raw text is available, perform 'rehearsal' updates,
# which use unlabelled data to reduce overfitting.
raw_batch = list(next(raw_batches))
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)
if not int(os.environ.get("LOG_FRIENDLY", 0)):
pbar.update(sum(len(doc) for doc in docs))
words_seen += sum(len(doc) for doc in docs)
with nlp.use_params(optimizer.averages):
util.set_env_log(False)
epoch_model_path = output_path / ("model%d" % i)
nlp.to_disk(epoch_model_path)
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc))
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs, debug)
end_time = timer()
if use_gpu < 0:
gpu_wps = None
cpu_wps = nwords / (end_time - start_time)
else:
gpu_wps = nwords / (end_time - start_time)
with Model.use_device("cpu"):
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(
corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc)
)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs)
end_time = timer()
cpu_wps = nwords / (end_time - start_time)
acc_loc = output_path / ("model%d" % i) / "accuracy.json"
srsly.write_json(acc_loc, scorer.scores)
# Update model meta.json
meta["lang"] = nlp.lang
meta["pipeline"] = nlp.pipe_names
meta["spacy_version"] = ">=%s" % about.__version__
meta["accuracy"] = scorer.scores
meta["speed"] = {"nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps}
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
"name": nlp.vocab.vectors.name,
}
meta.setdefault("name", "model%d" % i)
meta.setdefault("version", version)
meta_loc = output_path / ("model%d" % i) / "meta.json"
srsly.write_json(meta_loc, meta)
util.set_env_log(verbose)
progress = _get_progress(
i, losses, scorer.scores, cpu_wps=cpu_wps, gpu_wps=gpu_wps
)
msg.row(progress, **row_settings)
finally:
with nlp.use_params(optimizer.averages):
final_model_path = output_path / "model-final"
nlp.to_disk(final_model_path)
msg.good("Saved model to output directory", final_model_path)
with msg.loading("Creating best model..."):
best_model_path = _collate_best_model(meta, output_path, nlp.pipe_names)
msg.good("Created best model", best_model_path)
|
def train(
lang,
output_path,
train_path,
dev_path,
raw_text=None,
base_model=None,
pipeline="tagger,parser,ner",
vectors=None,
n_iter=30,
n_examples=0,
use_gpu=-1,
version="0.0.0",
meta_path=None,
init_tok2vec=None,
parser_multitasks="",
entity_multitasks="",
noise_level=0.0,
gold_preproc=False,
learn_tokens=False,
verbose=False,
debug=False,
):
"""
Train or update a spaCy model. Requires data to be formatted in spaCy's
JSON format. To convert data from other formats, use the `spacy convert`
command.
"""
msg = Printer()
util.fix_random_seed()
util.set_env_log(verbose)
# Make sure all files and paths exists if they are needed
train_path = util.ensure_path(train_path)
dev_path = util.ensure_path(dev_path)
meta_path = util.ensure_path(meta_path)
if raw_text is not None:
raw_text = list(srsly.read_jsonl(raw_text))
if not train_path or not train_path.exists():
msg.fail("Training data not found", train_path, exits=1)
if not dev_path or not dev_path.exists():
msg.fail("Development data not found", dev_path, exits=1)
if meta_path is not None and not meta_path.exists():
msg.fail("Can't find model meta.json", meta_path, exits=1)
meta = srsly.read_json(meta_path) if meta_path else {}
if output_path.exists() and [p for p in output_path.iterdir() if p.is_dir()]:
msg.warn(
"Output directory is not empty",
"This can lead to unintended side effects when saving the model. "
"Please use an empty directory or a different path instead. If "
"the specified output path doesn't exist, the directory will be "
"created for you.",
)
if not output_path.exists():
output_path.mkdir()
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.2),
util.env_opt("dropout_to", 0.2),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 100.0),
util.env_opt("batch_to", 1000.0),
util.env_opt("batch_compound", 1.001),
)
# Set up the base model and pipeline. If a base model is specified, load
# the model and make sure the pipeline matches the pipeline setting. If
# training starts from a blank model, intitalize the language class.
pipeline = [p.strip() for p in pipeline.split(",")]
msg.text("Training pipeline: {}".format(pipeline))
if base_model:
msg.text("Starting with base model '{}'".format(base_model))
nlp = util.load_model(base_model)
if nlp.lang != lang:
msg.fail(
"Model language ('{}') doesn't match language specified as "
"`lang` argument ('{}') ".format(nlp.lang, lang),
exits=1,
)
other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipeline]
nlp.disable_pipes(*other_pipes)
for pipe in pipeline:
if pipe not in nlp.pipe_names:
nlp.add_pipe(nlp.create_pipe(pipe))
else:
msg.text("Starting with blank model '{}'".format(lang))
lang_cls = util.get_lang_class(lang)
nlp = lang_cls()
for pipe in pipeline:
nlp.add_pipe(nlp.create_pipe(pipe))
if learn_tokens:
nlp.add_pipe(nlp.create_pipe("merge_subtokens"))
if vectors:
msg.text("Loading vector from model '{}'".format(vectors))
_load_vectors(nlp, vectors)
# Multitask objectives
multitask_options = [("parser", parser_multitasks), ("ner", entity_multitasks)]
for pipe_name, multitasks in multitask_options:
if multitasks:
if pipe_name not in pipeline:
msg.fail(
"Can't use multitask objective without '{}' in the pipeline".format(
pipe_name
)
)
pipe = nlp.get_pipe(pipe_name)
for objective in multitasks.split(","):
pipe.add_multitask_objective(objective)
# Prepare training corpus
msg.text("Counting training words (limit={})".format(n_examples))
corpus = GoldCorpus(train_path, dev_path, limit=n_examples)
n_train_words = corpus.count_train()
if base_model:
# Start with an existing model, use default optimizer
optimizer = create_default_optimizer(Model.ops)
else:
# Start with a blank model, call begin_training
optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu)
nlp._optimizer = None
# Load in pre-trained weights
if init_tok2vec is not None:
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
msg.text("Loaded pretrained tok2vec for: {}".format(components))
# fmt: off
row_head = ("Itn", "Dep Loss", "NER Loss", "UAS", "NER P", "NER R", "NER F", "Tag %", "Token %", "CPU WPS", "GPU WPS")
row_settings = {
"widths": (3, 10, 10, 7, 7, 7, 7, 7, 7, 7, 7),
"aligns": tuple(["r" for i in row_head]),
"spacing": 2
}
# fmt: on
print("")
msg.row(row_head, **row_settings)
msg.row(["-" * width for width in row_settings["widths"]], **row_settings)
try:
for i in range(n_iter):
train_docs = corpus.train_docs(
nlp, noise_level=noise_level, gold_preproc=gold_preproc, max_length=0
)
if raw_text:
random.shuffle(raw_text)
raw_batches = util.minibatch(
(nlp.make_doc(rt["text"]) for rt in raw_text), size=8
)
words_seen = 0
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
losses = {}
for batch in util.minibatch_by_words(train_docs, size=batch_sizes):
if not batch:
continue
docs, golds = zip(*batch)
nlp.update(
docs,
golds,
sgd=optimizer,
drop=next(dropout_rates),
losses=losses,
)
if raw_text:
# If raw text is available, perform 'rehearsal' updates,
# which use unlabelled data to reduce overfitting.
raw_batch = list(next(raw_batches))
nlp.rehearse(raw_batch, sgd=optimizer, losses=losses)
if not int(os.environ.get("LOG_FRIENDLY", 0)):
pbar.update(sum(len(doc) for doc in docs))
words_seen += sum(len(doc) for doc in docs)
with nlp.use_params(optimizer.averages):
util.set_env_log(False)
epoch_model_path = output_path / ("model%d" % i)
nlp.to_disk(epoch_model_path)
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc))
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs, debug)
end_time = timer()
if use_gpu < 0:
gpu_wps = None
cpu_wps = nwords / (end_time - start_time)
else:
gpu_wps = nwords / (end_time - start_time)
with Model.use_device("cpu"):
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(
corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc)
)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs)
end_time = timer()
cpu_wps = nwords / (end_time - start_time)
acc_loc = output_path / ("model%d" % i) / "accuracy.json"
srsly.write_json(acc_loc, scorer.scores)
# Update model meta.json
meta["lang"] = nlp.lang
meta["pipeline"] = nlp.pipe_names
meta["spacy_version"] = ">=%s" % about.__version__
meta["accuracy"] = scorer.scores
meta["speed"] = {"nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps}
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
}
meta.setdefault("name", "model%d" % i)
meta.setdefault("version", version)
meta_loc = output_path / ("model%d" % i) / "meta.json"
srsly.write_json(meta_loc, meta)
util.set_env_log(verbose)
progress = _get_progress(
i, losses, scorer.scores, cpu_wps=cpu_wps, gpu_wps=gpu_wps
)
msg.row(progress, **row_settings)
finally:
with nlp.use_params(optimizer.averages):
final_model_path = output_path / "model-final"
nlp.to_disk(final_model_path)
msg.good("Saved model to output directory", final_model_path)
with msg.loading("Creating best model..."):
best_model_path = _collate_best_model(meta, output_path, nlp.pipe_names)
msg.good("Created best model", best_model_path)
|
https://github.com/explosion/spaCy/issues/3093
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/__init__.py", line 21, in load
return util.load_model(name, **overrides)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 116, in load_model
return load_model_from_path(Path(name), **overrides)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 156, in load_model_from_path
return nlp.from_disk(model_path)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/language.py", line 647, in from_disk
util.from_disk(path, deserializers, exclude)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 511, in from_disk
reader(path / key)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/language.py", line 643, in <lambda>
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, vocab=False)
File "pipeline.pyx", line 643, in spacy.pipeline.Tagger.from_disk
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 511, in from_disk
reader(path / key)
File "pipeline.pyx", line 625, in spacy.pipeline.Tagger.from_disk.load_model
File "pipeline.pyx", line 535, in spacy.pipeline.Tagger.Model
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/_ml.py", line 447, in build_tagger_model
pretrained_vectors=pretrained_vectors)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/_ml.py", line 278, in Tok2Vec
glove = StaticVectors(pretrained_vectors, width, column=cols.index(ID))
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 41, in __init__
vectors = self.get_vectors()
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 52, in get_vectors
return get_vectors(self.ops, self.lang)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/extra/load_nlp.py", line 19, in get_vectors
nlp = get_spacy(lang)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/thinc/extra/load_nlp.py", line 11, in get_spacy
SPACY_MODELS[lang] = spacy.load(lang, **kwargs)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/__init__.py", line 21, in load
return util.load_model(name, **overrides)
File "/home/ubuntu/src/spacy-nb/.venv/lib/python3.6/site-packages/spacy/util.py", line 119, in load_model
raise IOError(Errors.E050.format(name=name))
OSError: [E050] Can't find model 'nb_model.vectors'. It doesn't seem to be a shortcut link, a Python package or a valid path to a data directory.
|
OSError
|
def __init__(self, cls, nlp=None):
self.vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
MeCab = try_mecab_import()
self.tokenizer = MeCab.Tagger()
self.tokenizer.parseToNode("") # see #2901
|
def __init__(self, cls, nlp=None):
self.vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
MeCab = try_mecab_import()
self.tokenizer = MeCab.Tagger()
|
https://github.com/explosion/spaCy/issues/2901
|
import spacy
nlp = spacy.blank('ja')
nlp('pythonが大好きです')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/spacy/language.py", line 340, in __call__
doc = self.make_doc(text)
File "/usr/local/lib/python3.6/site-packages/spacy/lang/ja/__init__.py", line 117, in make_doc
return self.tokenizer(text)
File "/usr/local/lib/python3.6/site-packages/spacy/lang/ja/__init__.py", line 81, in __call__
doc = Doc(self.vocab, words=words, spaces=[False]*len(words))
File "doc.pyx", line 176, in spacy.tokens.doc.Doc.__init__
File "doc.pyx", line 559, in spacy.tokens.doc.Doc.push_back
ValueError: [E031] Invalid token: empty string ('') at position 0.
nlp('pythonが大好きです')
pythonが大好きです
|
ValueError
|
def main(model=None, output_dir=None, n_iter=20, n_texts=2000):
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# add the text classifier to the pipeline if it doesn't exist
# nlp.create_pipe works for built-ins that are registered with spaCy
if "textcat" not in nlp.pipe_names:
textcat = nlp.create_pipe("textcat")
nlp.add_pipe(textcat, last=True)
# otherwise, get it, so we can add labels to it
else:
textcat = nlp.get_pipe("textcat")
# add label to text classifier
textcat.add_label("POSITIVE")
textcat.add_label("NEGATIVE")
# load the IMDB dataset
print("Loading IMDB data...")
(train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)
print(
"Using {} examples ({} training, {} evaluation)".format(
n_texts, len(train_texts), len(dev_texts)
)
)
train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats]))
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "textcat"]
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
print("Training the model...")
print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F"))
for i in range(n_iter):
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(train_data, size=compounding(4.0, 16.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)
with textcat.model.use_params(optimizer.averages):
# evaluate on the dev data split off in load_data()
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
print(
"{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format( # print a simple table
losses["textcat"],
scores["textcat_p"],
scores["textcat_r"],
scores["textcat_f"],
)
)
# test the trained model
test_text = "This movie sucked"
doc = nlp(test_text)
print(test_text, doc.cats)
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc2 = nlp2(test_text)
print(test_text, doc2.cats)
|
def main(model=None, output_dir=None, n_iter=20, n_texts=2000):
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank("en") # create blank Language class
print("Created blank 'en' model")
# add the text classifier to the pipeline if it doesn't exist
# nlp.create_pipe works for built-ins that are registered with spaCy
if "textcat" not in nlp.pipe_names:
textcat = nlp.create_pipe("textcat")
nlp.add_pipe(textcat, last=True)
# otherwise, get it, so we can add labels to it
else:
textcat = nlp.get_pipe("textcat")
# add label to text classifier
textcat.add_label("POSITIVE")
# load the IMDB dataset
print("Loading IMDB data...")
(train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)
print(
"Using {} examples ({} training, {} evaluation)".format(
n_texts, len(train_texts), len(dev_texts)
)
)
train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats]))
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "textcat"]
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
print("Training the model...")
print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F"))
for i in range(n_iter):
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(train_data, size=compounding(4.0, 32.0, 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)
with textcat.model.use_params(optimizer.averages):
# evaluate on the dev data split off in load_data()
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
print(
"{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format( # print a simple table
losses["textcat"],
scores["textcat_p"],
scores["textcat_r"],
scores["textcat_f"],
)
)
# test the trained model
test_text = "This movie sucked"
doc = nlp(test_text)
print(test_text, doc.cats)
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc2 = nlp2(test_text)
print(test_text, doc2.cats)
|
https://github.com/explosion/spaCy/issues/1798
|
$ python scripts/train_textcat.py
Created blank 'en' model
Loading IMDB data...
Using 2000 examples (1600 training, 400 evaluation)
Training the model...
LOSS P R F
Traceback (most recent call last):
File "scripts/train_textcat.py", line 133, in <module>
plac.call(main)
File "/home/motoki/aes/lib/python3.6/site-packages/plac_core.py", line 328, in call
cmd, result = parser.consume(arglist)
File "/home/motoki/aes/lib/python3.6/site-packages/plac_core.py", line 207, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "scripts/train_textcat.py", line 68, in main
losses=losses)
File "/home/motoki/aes/lib/python3.6/site-packages/spacy/language.py", line 407, in update
proc.update(docs, golds, drop=drop, sgd=get_grads, losses=losses)
File "pipeline.pyx", line 817, in spacy.pipeline.TextCategorizer.update
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/home/motoki/aes/lib/python3.6/site-packages/spacy/_ml.py", line 101, in _preprocess_doc
keys = ops.xp.concatenate(keys)
File "/home/motoki/aes/lib/python3.6/site-packages/cupy/manipulation/join.py", line 49, in concatenate
return core.concatenate_method(tup, axis)
File "cupy/core/core.pyx", line 2410, in cupy.core.core.concatenate_method
File "cupy/core/core.pyx", line 2422, in cupy.core.core.concatenate_method
TypeError: Only cupy arrays can be concatenated
|
TypeError
|
def load_data(limit=0, split=0.8):
"""Load data from the IMDB dataset."""
# Partition off part of the train data for evaluation
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:])
|
def load_data(limit=0, split=0.8):
"""Load data from the IMDB dataset."""
# Partition off part of the train data for evaluation
train_data, _ = thinc.extra.datasets.imdb()
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"POSITIVE": bool(y)} for y in labels]
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:])
|
https://github.com/explosion/spaCy/issues/1798
|
$ python scripts/train_textcat.py
Created blank 'en' model
Loading IMDB data...
Using 2000 examples (1600 training, 400 evaluation)
Training the model...
LOSS P R F
Traceback (most recent call last):
File "scripts/train_textcat.py", line 133, in <module>
plac.call(main)
File "/home/motoki/aes/lib/python3.6/site-packages/plac_core.py", line 328, in call
cmd, result = parser.consume(arglist)
File "/home/motoki/aes/lib/python3.6/site-packages/plac_core.py", line 207, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "scripts/train_textcat.py", line 68, in main
losses=losses)
File "/home/motoki/aes/lib/python3.6/site-packages/spacy/language.py", line 407, in update
proc.update(docs, golds, drop=drop, sgd=get_grads, losses=losses)
File "pipeline.pyx", line 817, in spacy.pipeline.TextCategorizer.update
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/home/motoki/aes/lib/python3.6/site-packages/spacy/_ml.py", line 101, in _preprocess_doc
keys = ops.xp.concatenate(keys)
File "/home/motoki/aes/lib/python3.6/site-packages/cupy/manipulation/join.py", line 49, in concatenate
return core.concatenate_method(tup, axis)
File "cupy/core/core.pyx", line 2410, in cupy.core.core.concatenate_method
File "cupy/core/core.pyx", line 2422, in cupy.core.core.concatenate_method
TypeError: Only cupy arrays can be concatenated
|
TypeError
|
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 0.0 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 0.0 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if label == "NEGATIVE":
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.0
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.0
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_score = 2 * (precision * recall) / (precision + recall)
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
|
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 1e-8 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 1e-8 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.0
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.0
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_score = 2 * (precision * recall) / (precision + recall)
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
|
https://github.com/explosion/spaCy/issues/1798
|
$ python scripts/train_textcat.py
Created blank 'en' model
Loading IMDB data...
Using 2000 examples (1600 training, 400 evaluation)
Training the model...
LOSS P R F
Traceback (most recent call last):
File "scripts/train_textcat.py", line 133, in <module>
plac.call(main)
File "/home/motoki/aes/lib/python3.6/site-packages/plac_core.py", line 328, in call
cmd, result = parser.consume(arglist)
File "/home/motoki/aes/lib/python3.6/site-packages/plac_core.py", line 207, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "scripts/train_textcat.py", line 68, in main
losses=losses)
File "/home/motoki/aes/lib/python3.6/site-packages/spacy/language.py", line 407, in update
proc.update(docs, golds, drop=drop, sgd=get_grads, losses=losses)
File "pipeline.pyx", line 817, in spacy.pipeline.TextCategorizer.update
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/home/motoki/aes/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/home/motoki/aes/lib/python3.6/site-packages/spacy/_ml.py", line 101, in _preprocess_doc
keys = ops.xp.concatenate(keys)
File "/home/motoki/aes/lib/python3.6/site-packages/cupy/manipulation/join.py", line 49, in concatenate
return core.concatenate_method(tup, axis)
File "cupy/core/core.pyx", line 2410, in cupy.core.core.concatenate_method
File "cupy/core/core.pyx", line 2422, in cupy.core.core.concatenate_method
TypeError: Only cupy arrays can be concatenated
|
TypeError
|
def symlink_to(orig, dest):
if is_windows:
import subprocess
subprocess.call(["mklink", "/d", path2str(orig), path2str(dest)], shell=True)
else:
orig.symlink_to(dest)
|
def symlink_to(orig, dest):
if is_python2 and is_windows:
import subprocess
subprocess.call(["mklink", "/d", path2str(orig), path2str(dest)], shell=True)
else:
orig.symlink_to(dest)
|
https://github.com/explosion/spaCy/issues/2948
|
(venv) C:\g\py\spacy> python -m spacy link en_core_web_sm en
C:\Program Files\Python37\lib\importlib\_bootstrap.py:219: RuntimeWarning: cymem.cymem.Pool size changed, may indicate binary incompatibility. Expected 48 from C header, got 64 from PyObject
return f(*args, **kwds)
C:\Program Files\Python37\lib\importlib\_bootstrap.py:219: RuntimeWarning: cymem.cymem.Address size changed, may indicate binary incompatibility. Expected 24 from C header, got 40 from PyObject
return f(*args, **kwds)
Error: Couldn't link model to 'en'
Creating a symlink in spacy/data failed. Make sure you have the required
permissions and try re-running the command as admin, or use a
virtualenv. You can still import the model as a module and call its
load() method, or create the symlink manually.
C:\g\py\spacy\venv\lib\site-packages\en_core_web_sm -->
C:\g\py\spacy\venv\lib\site-packages\spacy\data\en
Traceback (most recent call last):
File "C:\Program Files\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Program Files\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\g\py\spacy\venv\lib\site-packages\spacy\__main__.py", line 31, in <module>
plac.call(commands[command], sys.argv[1:])
File "C:\g\py\spacy\venv\lib\site-packages\plac_core.py", line 328, in call
cmd, result = parser.consume(arglist)
File "C:\g\py\spacy\venv\lib\site-packages\plac_core.py", line 207, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "C:\g\py\spacy\venv\lib\site-packages\spacy\cli\link.py", line 48, in link
symlink_to(link_path, model_path)
File "C:\g\py\spacy\venv\lib\site-packages\spacy\compat.py", line 87, in symlink_to
orig.symlink_to(dest)
File "C:\Program Files\Python37\lib\pathlib.py", line 1320, in symlink_to
self._accessor.symlink(target, self, target_is_directory)
OSError: symbolic link privilege not held
|
OSError
|
def link_vectors_to_models(vocab):
vectors = vocab.vectors
if vectors.name is None:
vectors.name = VECTORS_KEY
print(
"Warning: Unnamed vectors -- this won't allow multiple vectors "
"models to be loaded. (Shape: (%d, %d))" % vectors.data.shape
)
ops = Model.ops
for word in vocab:
if word.orth in vectors.key2row:
word.rank = vectors.key2row[word.orth]
else:
word.rank = 0
data = ops.asarray(vectors.data)
# Set an entry here, so that vectors are accessed by StaticVectors
# (unideal, I know)
thinc.extra.load_nlp.VECTORS[(ops.device, vectors.name)] = data
|
def link_vectors_to_models(vocab):
vectors = vocab.vectors
ops = Model.ops
for word in vocab:
if word.orth in vectors.key2row:
word.rank = vectors.key2row[word.orth]
else:
word.rank = 0
data = ops.asarray(vectors.data)
# Set an entry here, so that vectors are accessed by StaticVectors
# (unideal, I know)
thinc.extra.load_nlp.VECTORS[(ops.device, VECTORS_KEY)] = data
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def Tok2Vec(width, embed_size, **kwargs):
pretrained_vectors = kwargs.get("pretrained_vectors", None)
cnn_maxout_pieces = kwargs.get("cnn_maxout_pieces", 2)
cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]
with Model.define_operators(
{">>": chain, "|": concatenate, "**": clone, "+": add, "*": reapply}
):
norm = HashEmbed(width, embed_size, column=cols.index(NORM), name="embed_norm")
prefix = HashEmbed(
width, embed_size // 2, column=cols.index(PREFIX), name="embed_prefix"
)
suffix = HashEmbed(
width, embed_size // 2, column=cols.index(SUFFIX), name="embed_suffix"
)
shape = HashEmbed(
width, embed_size // 2, column=cols.index(SHAPE), name="embed_shape"
)
if pretrained_vectors is not None:
glove = StaticVectors(pretrained_vectors, width, column=cols.index(ID))
embed = uniqued(
(glove | norm | prefix | suffix | shape)
>> LN(Maxout(width, width * 5, pieces=3)),
column=cols.index(ORTH),
)
else:
embed = uniqued(
(norm | prefix | suffix | shape)
>> LN(Maxout(width, width * 4, pieces=3)),
column=cols.index(ORTH),
)
convolution = Residual(
ExtractWindow(nW=1)
>> LN(Maxout(width, width * 3, pieces=cnn_maxout_pieces))
)
tok2vec = FeatureExtracter(cols) >> with_flatten(embed >> convolution**4, pad=4)
# Work around thinc API limitations :(. TODO: Revise in Thinc 7
tok2vec.nO = width
tok2vec.embed = embed
return tok2vec
|
def Tok2Vec(width, embed_size, **kwargs):
pretrained_dims = kwargs.get("pretrained_dims", 0)
cnn_maxout_pieces = kwargs.get("cnn_maxout_pieces", 2)
cols = [ID, NORM, PREFIX, SUFFIX, SHAPE, ORTH]
with Model.define_operators(
{">>": chain, "|": concatenate, "**": clone, "+": add, "*": reapply}
):
norm = HashEmbed(width, embed_size, column=cols.index(NORM), name="embed_norm")
prefix = HashEmbed(
width, embed_size // 2, column=cols.index(PREFIX), name="embed_prefix"
)
suffix = HashEmbed(
width, embed_size // 2, column=cols.index(SUFFIX), name="embed_suffix"
)
shape = HashEmbed(
width, embed_size // 2, column=cols.index(SHAPE), name="embed_shape"
)
if pretrained_dims is not None and pretrained_dims >= 1:
glove = StaticVectors(VECTORS_KEY, width, column=cols.index(ID))
embed = uniqued(
(glove | norm | prefix | suffix | shape)
>> LN(Maxout(width, width * 5, pieces=3)),
column=5,
)
else:
embed = uniqued(
(norm | prefix | suffix | shape)
>> LN(Maxout(width, width * 4, pieces=3)),
column=5,
)
convolution = Residual(
ExtractWindow(nW=1)
>> LN(Maxout(width, width * 3, pieces=cnn_maxout_pieces))
)
tok2vec = FeatureExtracter(cols) >> with_flatten(embed >> convolution**4, pad=4)
# Work around thinc API limitations :(. TODO: Revise in Thinc 7
tok2vec.nO = width
tok2vec.embed = embed
return tok2vec
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def build_tagger_model(nr_class, **cfg):
embed_size = util.env_opt("embed_size", 7000)
if "token_vector_width" in cfg:
token_vector_width = cfg["token_vector_width"]
else:
token_vector_width = util.env_opt("token_vector_width", 128)
pretrained_vectors = cfg.get("pretrained_vectors")
with Model.define_operators({">>": chain, "+": add}):
if "tok2vec" in cfg:
tok2vec = cfg["tok2vec"]
else:
tok2vec = Tok2Vec(
token_vector_width, embed_size, pretrained_vectors=pretrained_vectors
)
softmax = with_flatten(Softmax(nr_class, token_vector_width))
model = tok2vec >> softmax
model.nI = None
model.tok2vec = tok2vec
model.softmax = softmax
return model
|
def build_tagger_model(nr_class, **cfg):
embed_size = util.env_opt("embed_size", 7000)
if "token_vector_width" in cfg:
token_vector_width = cfg["token_vector_width"]
else:
token_vector_width = util.env_opt("token_vector_width", 128)
pretrained_dims = cfg.get("pretrained_dims", 0)
with Model.define_operators({">>": chain, "+": add}):
if "tok2vec" in cfg:
tok2vec = cfg["tok2vec"]
else:
tok2vec = Tok2Vec(
token_vector_width, embed_size, pretrained_dims=pretrained_dims
)
softmax = with_flatten(Softmax(nr_class, token_vector_width))
model = tok2vec >> softmax
model.nI = None
model.tok2vec = tok2vec
model.softmax = softmax
return model
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def train(
lang,
output_dir,
train_data,
dev_data,
n_iter=30,
n_sents=0,
parser_multitasks="",
entity_multitasks="",
use_gpu=-1,
vectors=None,
no_tagger=False,
no_parser=False,
no_entities=False,
gold_preproc=False,
version="0.0.0",
meta_path=None,
):
"""
Train a model. Expects data in spaCy's JSON format.
"""
util.fix_random_seed()
util.set_env_log(True)
n_sents = n_sents or None
output_path = util.ensure_path(output_dir)
train_path = util.ensure_path(train_data)
dev_path = util.ensure_path(dev_data)
meta_path = util.ensure_path(meta_path)
if not output_path.exists():
output_path.mkdir()
if not train_path.exists():
prints(train_path, title="Training data not found", exits=1)
if dev_path and not dev_path.exists():
prints(dev_path, title="Development data not found", exits=1)
if meta_path is not None and not meta_path.exists():
prints(meta_path, title="meta.json not found", exits=1)
meta = util.read_json(meta_path) if meta_path else {}
if not isinstance(meta, dict):
prints(
"Expected dict but got: {}".format(type(meta)),
title="Not a valid meta.json format",
exits=1,
)
meta.setdefault("lang", lang)
meta.setdefault("name", "unnamed")
pipeline = ["tagger", "parser", "ner"]
if no_tagger and "tagger" in pipeline:
pipeline.remove("tagger")
if no_parser and "parser" in pipeline:
pipeline.remove("parser")
if no_entities and "ner" in pipeline:
pipeline.remove("ner")
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.2),
util.env_opt("dropout_to", 0.2),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 1),
util.env_opt("batch_to", 16),
util.env_opt("batch_compound", 1.001),
)
max_doc_len = util.env_opt("max_doc_len", 5000)
corpus = GoldCorpus(train_path, dev_path, limit=n_sents)
n_train_words = corpus.count_train()
lang_class = util.get_lang_class(lang)
nlp = lang_class()
meta["pipeline"] = pipeline
nlp.meta.update(meta)
if vectors:
print("Load vectors model", vectors)
util.load_model(vectors, vocab=nlp.vocab)
for lex in nlp.vocab:
values = {}
for attr, func in nlp.vocab.lex_attr_getters.items():
# These attrs are expected to be set by data. Others should
# be set by calling the language functions.
if attr not in (CLUSTER, PROB, IS_OOV, LANG):
values[lex.vocab.strings[attr]] = func(lex.orth_)
lex.set_attrs(**values)
lex.is_oov = False
for name in pipeline:
nlp.add_pipe(nlp.create_pipe(name), name=name)
if parser_multitasks:
for objective in parser_multitasks.split(","):
nlp.parser.add_multitask_objective(objective)
if entity_multitasks:
for objective in entity_multitasks.split(","):
nlp.entity.add_multitask_objective(objective)
optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu)
nlp._optimizer = None
print("Itn.\tP.Loss\tN.Loss\tUAS\tNER P.\tNER R.\tNER F.\tTag %\tToken %")
try:
train_docs = corpus.train_docs(
nlp,
projectivize=True,
noise_level=0.0,
gold_preproc=gold_preproc,
max_length=0,
)
train_docs = list(train_docs)
for i in range(n_iter):
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
losses = {}
for batch in minibatch(train_docs, size=batch_sizes):
batch = [(d, g) for (d, g) in batch if len(d) < max_doc_len]
if not batch:
continue
docs, golds = zip(*batch)
nlp.update(
docs,
golds,
sgd=optimizer,
drop=next(dropout_rates),
losses=losses,
)
pbar.update(sum(len(doc) for doc in docs))
with nlp.use_params(optimizer.averages):
util.set_env_log(False)
epoch_model_path = output_path / ("model%d" % i)
nlp.to_disk(epoch_model_path)
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc))
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs)
end_time = timer()
if use_gpu < 0:
gpu_wps = None
cpu_wps = nwords / (end_time - start_time)
else:
gpu_wps = nwords / (end_time - start_time)
with Model.use_device("cpu"):
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(
corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc)
)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs)
end_time = timer()
cpu_wps = nwords / (end_time - start_time)
acc_loc = output_path / ("model%d" % i) / "accuracy.json"
with acc_loc.open("w") as file_:
file_.write(json_dumps(scorer.scores))
meta_loc = output_path / ("model%d" % i) / "meta.json"
meta["accuracy"] = scorer.scores
meta["speed"] = {"nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps}
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
}
meta["lang"] = nlp.lang
meta["pipeline"] = pipeline
meta["spacy_version"] = ">=%s" % about.__version__
meta.setdefault("name", "model%d" % i)
meta.setdefault("version", version)
with meta_loc.open("w") as file_:
file_.write(json_dumps(meta))
util.set_env_log(True)
print_progress(i, losses, scorer.scores, cpu_wps=cpu_wps, gpu_wps=gpu_wps)
finally:
print("Saving model...")
with nlp.use_params(optimizer.averages):
final_model_path = output_path / "model-final"
nlp.to_disk(final_model_path)
|
def train(
lang,
output_dir,
train_data,
dev_data,
n_iter=30,
n_sents=0,
parser_multitasks="",
entity_multitasks="",
use_gpu=-1,
vectors=None,
no_tagger=False,
no_parser=False,
no_entities=False,
gold_preproc=False,
version="0.0.0",
meta_path=None,
):
"""
Train a model. Expects data in spaCy's JSON format.
"""
util.fix_random_seed()
util.set_env_log(True)
n_sents = n_sents or None
output_path = util.ensure_path(output_dir)
train_path = util.ensure_path(train_data)
dev_path = util.ensure_path(dev_data)
meta_path = util.ensure_path(meta_path)
if not output_path.exists():
output_path.mkdir()
if not train_path.exists():
prints(train_path, title="Training data not found", exits=1)
if dev_path and not dev_path.exists():
prints(dev_path, title="Development data not found", exits=1)
if meta_path is not None and not meta_path.exists():
prints(meta_path, title="meta.json not found", exits=1)
meta = util.read_json(meta_path) if meta_path else {}
if not isinstance(meta, dict):
prints(
"Expected dict but got: {}".format(type(meta)),
title="Not a valid meta.json format",
exits=1,
)
meta.setdefault("lang", lang)
meta.setdefault("name", "unnamed")
pipeline = ["tagger", "parser", "ner"]
if no_tagger and "tagger" in pipeline:
pipeline.remove("tagger")
if no_parser and "parser" in pipeline:
pipeline.remove("parser")
if no_entities and "ner" in pipeline:
pipeline.remove("ner")
# Take dropout and batch size as generators of values -- dropout
# starts high and decays sharply, to force the optimizer to explore.
# Batch size starts at 1 and grows, so that we make updates quickly
# at the beginning of training.
dropout_rates = util.decaying(
util.env_opt("dropout_from", 0.2),
util.env_opt("dropout_to", 0.2),
util.env_opt("dropout_decay", 0.0),
)
batch_sizes = util.compounding(
util.env_opt("batch_from", 1),
util.env_opt("batch_to", 16),
util.env_opt("batch_compound", 1.001),
)
max_doc_len = util.env_opt("max_doc_len", 5000)
corpus = GoldCorpus(train_path, dev_path, limit=n_sents)
n_train_words = corpus.count_train()
lang_class = util.get_lang_class(lang)
nlp = lang_class()
meta["pipeline"] = pipeline
nlp.meta.update(meta)
if vectors:
util.load_model(vectors, vocab=nlp.vocab)
for lex in nlp.vocab:
values = {}
for attr, func in nlp.vocab.lex_attr_getters.items():
# These attrs are expected to be set by data. Others should
# be set by calling the language functions.
if attr not in (CLUSTER, PROB, IS_OOV, LANG):
values[lex.vocab.strings[attr]] = func(lex.orth_)
lex.set_attrs(**values)
lex.is_oov = False
for name in pipeline:
nlp.add_pipe(nlp.create_pipe(name), name=name)
if parser_multitasks:
for objective in parser_multitasks.split(","):
nlp.parser.add_multitask_objective(objective)
if entity_multitasks:
for objective in entity_multitasks.split(","):
nlp.entity.add_multitask_objective(objective)
optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu)
nlp._optimizer = None
print("Itn.\tP.Loss\tN.Loss\tUAS\tNER P.\tNER R.\tNER F.\tTag %\tToken %")
try:
train_docs = corpus.train_docs(
nlp,
projectivize=True,
noise_level=0.0,
gold_preproc=gold_preproc,
max_length=0,
)
train_docs = list(train_docs)
for i in range(n_iter):
with tqdm.tqdm(total=n_train_words, leave=False) as pbar:
losses = {}
for batch in minibatch(train_docs, size=batch_sizes):
batch = [(d, g) for (d, g) in batch if len(d) < max_doc_len]
if not batch:
continue
docs, golds = zip(*batch)
nlp.update(
docs,
golds,
sgd=optimizer,
drop=next(dropout_rates),
losses=losses,
)
pbar.update(sum(len(doc) for doc in docs))
with nlp.use_params(optimizer.averages):
util.set_env_log(False)
epoch_model_path = output_path / ("model%d" % i)
nlp.to_disk(epoch_model_path)
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc))
nwords = sum(len(doc_gold[0]) for doc_gold in dev_docs)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs)
end_time = timer()
if use_gpu < 0:
gpu_wps = None
cpu_wps = nwords / (end_time - start_time)
else:
gpu_wps = nwords / (end_time - start_time)
with Model.use_device("cpu"):
nlp_loaded = util.load_model_from_path(epoch_model_path)
dev_docs = list(
corpus.dev_docs(nlp_loaded, gold_preproc=gold_preproc)
)
start_time = timer()
scorer = nlp_loaded.evaluate(dev_docs)
end_time = timer()
cpu_wps = nwords / (end_time - start_time)
acc_loc = output_path / ("model%d" % i) / "accuracy.json"
with acc_loc.open("w") as file_:
file_.write(json_dumps(scorer.scores))
meta_loc = output_path / ("model%d" % i) / "meta.json"
meta["accuracy"] = scorer.scores
meta["speed"] = {"nwords": nwords, "cpu": cpu_wps, "gpu": gpu_wps}
meta["vectors"] = {
"width": nlp.vocab.vectors_length,
"vectors": len(nlp.vocab.vectors),
"keys": nlp.vocab.vectors.n_keys,
}
meta["lang"] = nlp.lang
meta["pipeline"] = pipeline
meta["spacy_version"] = ">=%s" % about.__version__
meta.setdefault("name", "model%d" % i)
meta.setdefault("version", version)
with meta_loc.open("w") as file_:
file_.write(json_dumps(meta))
util.set_env_log(True)
print_progress(i, losses, scorer.scores, cpu_wps=cpu_wps, gpu_wps=gpu_wps)
finally:
print("Saving model...")
with nlp.use_params(optimizer.averages):
final_model_path = output_path / "model-final"
nlp.to_disk(final_model_path)
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def __init__(self, vocab=True, make_doc=True, meta={}, **kwargs):
"""Initialise a Language object.
vocab (Vocab): A `Vocab` object. If `True`, a vocab is created via
`Language.Defaults.create_vocab`.
make_doc (callable): A function that takes text and returns a `Doc`
object. Usually a `Tokenizer`.
pipeline (list): A list of annotation processes or IDs of annotation,
processes, e.g. a `Tagger` object, or `'tagger'`. IDs are looked
up in `Language.Defaults.factories`.
disable (list): A list of component names to exclude from the pipeline.
The disable list has priority over the pipeline list -- if the same
string occurs in both, the component is not loaded.
meta (dict): Custom meta data for the Language class. Is written to by
models to add model meta data.
RETURNS (Language): The newly constructed object.
"""
self._meta = dict(meta)
self._path = None
if vocab is True:
factory = self.Defaults.create_vocab
vocab = factory(self, **meta.get("vocab", {}))
if vocab.vectors.name is None:
vocab.vectors.name = meta.get("vectors", {}).get("name")
self.vocab = vocab
if make_doc is True:
factory = self.Defaults.create_tokenizer
make_doc = factory(self, **meta.get("tokenizer", {}))
self.tokenizer = make_doc
self.pipeline = []
self._optimizer = None
|
def __init__(self, vocab=True, make_doc=True, meta={}, **kwargs):
"""Initialise a Language object.
vocab (Vocab): A `Vocab` object. If `True`, a vocab is created via
`Language.Defaults.create_vocab`.
make_doc (callable): A function that takes text and returns a `Doc`
object. Usually a `Tokenizer`.
pipeline (list): A list of annotation processes or IDs of annotation,
processes, e.g. a `Tagger` object, or `'tagger'`. IDs are looked
up in `Language.Defaults.factories`.
disable (list): A list of component names to exclude from the pipeline.
The disable list has priority over the pipeline list -- if the same
string occurs in both, the component is not loaded.
meta (dict): Custom meta data for the Language class. Is written to by
models to add model meta data.
RETURNS (Language): The newly constructed object.
"""
self._meta = dict(meta)
self._path = None
if vocab is True:
factory = self.Defaults.create_vocab
vocab = factory(self, **meta.get("vocab", {}))
self.vocab = vocab
if make_doc is True:
factory = self.Defaults.create_tokenizer
make_doc = factory(self, **meta.get("tokenizer", {}))
self.tokenizer = make_doc
self.pipeline = []
self._optimizer = None
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def begin_training(self, get_gold_tuples=None, sgd=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
**cfg: Config parameters.
RETURNS: An optimizer
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word]
contexts = []
if cfg.get("device", -1) >= 0:
device = util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
else:
device = None
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
proc.begin_training(
get_gold_tuples(), pipeline=self.pipeline, sgd=self._optimizer, **cfg
)
return self._optimizer
|
def begin_training(self, get_gold_tuples=None, sgd=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
**cfg: Config parameters.
RETURNS: An optimizer
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word]
contexts = []
if cfg.get("device", -1) >= 0:
device = util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
else:
device = None
link_vectors_to_models(self.vocab)
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
proc.begin_training(
get_gold_tuples(), pipeline=self.pipeline, sgd=self._optimizer, **cfg
)
return self._optimizer
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def from_disk(self, path, disable=tuple()):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory. Paths may be either
strings or `Path`-like objects.
disable (list): Names of the pipeline components to disable.
RETURNS (Language): The modified `Language` object.
EXAMPLE:
>>> from spacy.language import Language
>>> nlp = Language().from_disk('/path/to/models')
"""
path = util.ensure_path(path)
deserializers = OrderedDict(
(
("vocab", lambda p: self.vocab.from_disk(p)),
("tokenizer", lambda p: self.tokenizer.from_disk(p, vocab=False)),
("meta.json", lambda p: self.meta.update(util.read_json(p))),
)
)
_fix_pretrained_vectors_name(self)
for name, proc in self.pipeline:
if name in disable:
continue
if not hasattr(proc, "to_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, vocab=False)
exclude = {p: False for p in disable}
if not (path / "vocab").exists():
exclude["vocab"] = True
util.from_disk(path, deserializers, exclude)
self._path = path
return self
|
def from_disk(self, path, disable=tuple()):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory. Paths may be either
strings or `Path`-like objects.
disable (list): Names of the pipeline components to disable.
RETURNS (Language): The modified `Language` object.
EXAMPLE:
>>> from spacy.language import Language
>>> nlp = Language().from_disk('/path/to/models')
"""
path = util.ensure_path(path)
deserializers = OrderedDict(
(
("vocab", lambda p: self.vocab.from_disk(p)),
("tokenizer", lambda p: self.tokenizer.from_disk(p, vocab=False)),
("meta.json", lambda p: self.meta.update(util.read_json(p))),
)
)
for name, proc in self.pipeline:
if name in disable:
continue
if not hasattr(proc, "to_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, vocab=False)
exclude = {p: False for p in disable}
if not (path / "vocab").exists():
exclude["vocab"] = True
util.from_disk(path, deserializers, exclude)
self._path = path
return self
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def from_bytes(self, bytes_data, disable=[]):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
disable (list): Names of the pipeline components to disable.
RETURNS (Language): The `Language` object.
"""
deserializers = OrderedDict(
(
("vocab", lambda b: self.vocab.from_bytes(b)),
("tokenizer", lambda b: self.tokenizer.from_bytes(b, vocab=False)),
("meta", lambda b: self.meta.update(ujson.loads(b))),
)
)
_fix_pretrained_vectors_name(self)
for i, (name, proc) in enumerate(self.pipeline):
if name in disable:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[i] = lambda b, proc=proc: proc.from_bytes(b, vocab=False)
msg = util.from_bytes(bytes_data, deserializers, {})
return self
|
def from_bytes(self, bytes_data, disable=[]):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
disable (list): Names of the pipeline components to disable.
RETURNS (Language): The `Language` object.
"""
deserializers = OrderedDict(
(
("vocab", lambda b: self.vocab.from_bytes(b)),
("tokenizer", lambda b: self.tokenizer.from_bytes(b, vocab=False)),
("meta", lambda b: self.meta.update(ujson.loads(b))),
)
)
for i, (name, proc) in enumerate(self.pipeline):
if name in disable:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[i] = lambda b, proc=proc: proc.from_bytes(b, vocab=False)
msg = util.from_bytes(bytes_data, deserializers, {})
return self
|
https://github.com/explosion/spaCy/issues/1660
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Projects/foobar/.env/lib/python3.6/site-packages/spacy/language.py", line 333, in __call__
doc = proc(doc)
File "pipeline.pyx", line 390, in spacy.pipeline.Tagger.__call__
File "pipeline.pyx", line 402, in spacy.pipeline.Tagger.predict
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 293, in predict
X = layer(layer.ops.flatten(seqs_in, pad=pad))
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 55, in predict
X = layer(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 161, in __call__
return self.predict(x)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/model.py", line 125, in predict
y, _ = self.begin_update(X)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 372, in uniqued_fwd
Y_uniq, bp_Y_uniq = layer.begin_update(X[ind], drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 61, in begin_update
X, inc_layer_grad = layer.begin_update(X, drop=drop)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in begin_update
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 176, in <listcomp>
values = [fwd(X, *a, **k) for fwd in forward]
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/api.py", line 258, in wrap
output = func(*args, **kwargs)
File "/Projects/foobar/.env/lib/python3.6/site-packages/thinc/neural/_classes/static_vectors.py", line 67, in begin_update
dotted = self.ops.batch_dot(vectors, self.W)
File "ops.pyx", line 299, in thinc.neural.ops.NumpyOps.batch_dot
ValueError: shapes (4,0) and (300,128) not aligned: 0 (dim 1) != 300 (dim 0)
|
ValueError
|
def init_model(
lang, output_dir, freqs_loc, clusters_loc=None, vectors_loc=None, prune_vectors=-1
):
"""
Create a new model from raw data, like word frequencies, Brown clusters
and word vectors.
"""
if not freqs_loc.exists():
prints(freqs_loc, title="Can't find words frequencies file", exits=1)
clusters_loc = ensure_path(clusters_loc)
vectors_loc = ensure_path(vectors_loc)
probs, oov_prob = read_freqs(freqs_loc)
vectors_data, vector_keys = (
read_vectors(vectors_loc) if vectors_loc else (None, None)
)
clusters = read_clusters(clusters_loc) if clusters_loc else {}
nlp = create_model(
lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors
)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
return nlp
|
def init_model(
lang, output_dir, freqs_loc, clusters_loc=None, vectors_loc=None, prune_vectors=-1
):
"""
Create a new model from raw data, like word frequencies, Brown clusters
and word vectors.
"""
if not freqs_loc.exists():
prints(freqs_loc, title="Can't find words frequencies file", exits=1)
clusters_loc = ensure_path(clusters_loc)
vectors_loc = ensure_path(vectors_loc)
probs, oov_prob = read_freqs(freqs_loc)
vectors_data, vector_keys = read_vectors(vectors_loc) if vectors_loc else None, None
clusters = read_clusters(clusters_loc) if clusters_loc else {}
nlp = create_model(
lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors
)
if not output_dir.exists():
output_dir.mkdir()
nlp.to_disk(output_dir)
return nlp
|
https://github.com/explosion/spaCy/issues/1928
|
Counting frequencies...
923130it [00:02, 309685.80it/s]
Reading vectors...
108821it [00:11, 9870.09it/s]
Reading clusters...
1047705it [00:47, 22187.37it/s]
Creating model...
100%|██████████████████████████████| 36888/36888 [00:01<00:00, 21654.38it/s]
Traceback (most recent call last):
File "/usr/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/home/user/Documents/spacy-dev-resources/.venv/lib/python2.7/site-packages/spacy/__main__.py", line 31, in <module>
plac.call(commands[command], sys.argv[1:])
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/plac_core.py", line 328, in call
cmd, result = parser.consume(arglist)
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/plac_core.py", line 207, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/spacy/cli/init_model.py", line 42, in init_model
nlp = create_model(lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors)
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/spacy/cli/init_model.py", line 73, in create_model
nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)
File "vectors.pyx", line 56, in spacy.vectors.Vectors.__init__
AttributeError: 'tuple' object has no attribute 'shape'
|
AttributeError
|
def create_model(
lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors
):
print("Creating model...")
lang_class = get_lang_class(lang)
nlp = lang_class()
for lexeme in nlp.vocab:
lexeme.rank = 0
lex_added = 0
for i, (word, prob) in enumerate(
tqdm(sorted(probs.items(), key=lambda item: item[1], reverse=True))
):
lexeme = nlp.vocab[word]
lexeme.rank = i
lexeme.prob = prob
lexeme.is_oov = False
# Decode as a little-endian string, so that we can do & 15 to get
# the first 4 bits. See _parse_features.pyx
if word in clusters:
lexeme.cluster = int(clusters[word][::-1], 2)
else:
lexeme.cluster = 0
lex_added += 1
nlp.vocab.cfg.update({"oov_prob": oov_prob})
if len(vectors_data):
nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)
if prune_vectors >= 1:
nlp.vocab.prune_vectors(prune_vectors)
vec_added = len(nlp.vocab.vectors)
prints(
"{} entries, {} vectors".format(lex_added, vec_added),
title="Sucessfully compiled vocab",
)
return nlp
|
def create_model(
lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors
):
print("Creating model...")
lang_class = get_lang_class(lang)
nlp = lang_class()
for lexeme in nlp.vocab:
lexeme.rank = 0
lex_added = 0
for i, (word, prob) in enumerate(
tqdm(sorted(probs.items(), key=lambda item: item[1], reverse=True))
):
lexeme = nlp.vocab[word]
lexeme.rank = i
lexeme.prob = prob
lexeme.is_oov = False
# Decode as a little-endian string, so that we can do & 15 to get
# the first 4 bits. See _parse_features.pyx
if word in clusters:
lexeme.cluster = int(clusters[word][::-1], 2)
else:
lexeme.cluster = 0
lex_added += 1
nlp.vocab.cfg.update({"oov_prob": oov_prob})
if vectors_data:
nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)
if prune_vectors >= 1:
nlp.vocab.prune_vectors(prune_vectors)
vec_added = len(nlp.vocab.vectors)
prints(
"{} entries, {} vectors".format(lex_added, vec_added),
title="Sucessfully compiled vocab",
)
return nlp
|
https://github.com/explosion/spaCy/issues/1928
|
Counting frequencies...
923130it [00:02, 309685.80it/s]
Reading vectors...
108821it [00:11, 9870.09it/s]
Reading clusters...
1047705it [00:47, 22187.37it/s]
Creating model...
100%|██████████████████████████████| 36888/36888 [00:01<00:00, 21654.38it/s]
Traceback (most recent call last):
File "/usr/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/home/user/Documents/spacy-dev-resources/.venv/lib/python2.7/site-packages/spacy/__main__.py", line 31, in <module>
plac.call(commands[command], sys.argv[1:])
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/plac_core.py", line 328, in call
cmd, result = parser.consume(arglist)
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/plac_core.py", line 207, in consume
return cmd, self.func(*(args + varargs + extraopts), **kwargs)
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/spacy/cli/init_model.py", line 42, in init_model
nlp = create_model(lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors)
File "/home/user/Documents/spacy-dev-resources/.venv/local/lib/python2.7/site-packages/spacy/cli/init_model.py", line 73, in create_model
nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)
File "vectors.pyx", line 56, in spacy.vectors.Vectors.__init__
AttributeError: 'tuple' object has no attribute 'shape'
|
AttributeError
|
def load_model_from_link(name, **overrides):
"""Load a model from a shortcut link, or directory in spaCy data path."""
init_file = get_data_path() / name / "__init__.py"
spec = importlib.util.spec_from_file_location(name, str(init_file))
try:
cls = importlib.util.module_from_spec(spec)
except AttributeError:
raise IOError(
"Cant' load '%s'. If you're using a shortcut link, make sure it "
"points to a valid model package (not just a data directory)." % name
)
spec.loader.exec_module(cls)
return cls.load(**overrides)
|
def load_model_from_link(name, **overrides):
"""Load a model from a shortcut link, or directory in spaCy data path."""
init_file = get_data_path() / name / "__init__.py"
spec = importlib.util.spec_from_file_location(name, init_file)
try:
cls = importlib.util.module_from_spec(spec)
except AttributeError:
raise IOError(
"Cant' load '%s'. If you're using a shortcut link, make sure it "
"points to a valid model package (not just a data directory)." % name
)
spec.loader.exec_module(cls)
return cls.load(**overrides)
|
https://github.com/explosion/spaCy/issues/1271
|
In [2]: en = spacy.load('en')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-a732401aeb32> in <module>()
----> 1 en = spacy.load('en')
/src/spaCy/spacy/__init__.py in load(name, **overrides)
11 def load(name, **overrides):
12 name = resolve_load_name(name, **overrides)
---> 13 return util.load_model(name, **overrides)
14
15
/src/spaCy/spacy/util.py in load_model(name, **overrides)
101 if isinstance(name, basestring_):
102 if name in set([d.name for d in data_path.iterdir()]): # in data dir / shortcut
--> 103 return load_model_from_link(name, **overrides)
104 if is_package(name): # installed as package
105 return load_model_from_package(name, **overrides)
/src/spaCy/spacy/util.py in load_model_from_link(name, **overrides)
114 """Load a model from a shortcut link, or directory in spaCy data path."""
115 init_file = get_data_path() / name / '__init__.py'
--> 116 spec = importlib.util.spec_from_file_location(name, init_file)
117 try:
118 cls = importlib.util.module_from_spec(spec)
/usr/lib/python3.5/importlib/_bootstrap_external.py in spec_from_file_location(name, location, loader, submodule_search_locations)
AttributeError: 'PosixPath' object has no attribute 'endswith'
|
AttributeError
|
def get_json(url, desc):
r = requests.get(url)
if r.status_code != 200:
prints(
"Couldn't fetch %s. Please find a model for your spaCy installation "
"(v%s), and download it manually." % (desc, about.__version__),
about.__docs_models__,
title="Server error (%d)" % r.status_code,
exits=True,
)
return r.json()
|
def get_json(url, desc):
r = requests.get(url)
if r.status_code != 200:
prints(
"Couldn't fetch %s. Please find a model for your spaCy installation "
"(v%s), and download it manually." % (desc, about.__version__),
about.__docs__,
title="Server error (%d)" % r.status_code,
exits=True,
)
return r.json()
|
https://github.com/explosion/spaCy/issues/1051
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "spacy/tokens/doc.pyx", line 434, in __get__ (spacy/tokens/doc.cpp:9664)
ValueError: sentence boundary detection requires the dependency parse, which requires data to be installed. If you haven't done so, run:
python -m spacy download es
to install the data
|
ValueError
|
def depr_model_download(lang):
"""
Replace download modules within en and de with deprecation warning and
download default language model (using shortcut).
"""
prints(
"The spacy.%s.download command is now deprecated. Please use "
"python -m spacy download [model name or shortcut] instead. For "
"more info, see the documentation:" % lang,
about.__docs_models__,
"Downloading default '%s' model now..." % lang,
title="Warning: deprecated command",
)
download(lang)
|
def depr_model_download(lang):
"""
Replace download modules within en and de with deprecation warning and
download default language model (using shortcut).
"""
prints(
"The spacy.%s.download command is now deprecated. Please use "
"python -m spacy download [model name or shortcut] instead. For "
"more info, see the docs: %s." % (lang, about.__docs__),
"Downloading default '%s' model now..." % lang,
title="Warning: deprecated command",
)
download(lang)
|
https://github.com/explosion/spaCy/issues/1051
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "spacy/tokens/doc.pyx", line 434, in __get__ (spacy/tokens/doc.cpp:9664)
ValueError: sentence boundary detection requires the dependency parse, which requires data to be installed. If you haven't done so, run:
python -m spacy download es
to install the data
|
ValueError
|
def __init__(self, ax, raster_source, **kwargs):
self.raster_source = raster_source
if matplotlib.__version__ >= "3":
# This artist fills the Axes, so should not influence layout.
kwargs.setdefault("in_layout", False)
super(SlippyImageArtist, self).__init__(ax, **kwargs)
self.cache = []
ax.figure.canvas.mpl_connect("button_press_event", self.on_press)
ax.figure.canvas.mpl_connect("button_release_event", self.on_release)
self.on_release()
|
def __init__(self, ax, raster_source, **kwargs):
self.raster_source = raster_source
super(SlippyImageArtist, self).__init__(ax, **kwargs)
self.cache = []
ax.figure.canvas.mpl_connect("button_press_event", self.on_press)
ax.figure.canvas.mpl_connect("button_release_event", self.on_release)
self.on_release()
|
https://github.com/SciTools/cartopy/issues/1451
|
Traceback (most recent call last):
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3319, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-8-7607f9c8266c>", line 59, in <module>
plt.show()
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/pyplot.py", line 269, in show
return _show(*args, **kw)
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pycharm_matplotlib_backend/backend_interagg.py", line 27, in __call__
manager.show(**kwargs)
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pycharm_matplotlib_backend/backend_interagg.py", line 99, in show
self.canvas.show()
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pycharm_matplotlib_backend/backend_interagg.py", line 64, in show
self.figure.tight_layout()
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/figure.py", line 2476, in tight_layout
pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/tight_layout.py", line 362, in get_tight_layout_figure
pad=pad, h_pad=h_pad, w_pad=w_pad)
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/tight_layout.py", line 111, in auto_adjust_subplotpars
tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/tight_layout.py", line 112, in <listcomp>
if ax.get_visible()])
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/axes/_base.py", line 4393, in get_tightbbox
bbox = a.get_tightbbox(renderer)
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/artist.py", line 284, in get_tightbbox
bbox = self.get_window_extent(renderer)
File "/home/dwells/anaconda3/envs/kando/lib/python3.7/site-packages/matplotlib/image.py", line 868, in get_window_extent
x0, x1, y0, y1 = self._extent
TypeError: cannot unpack non-iterable NoneType object
|
TypeError
|
def _repr_html_(self):
if not six.PY2:
from html import escape
else:
from cgi import escape
try:
# As matplotlib is not a core cartopy dependency, don't error
# if it's not available.
import matplotlib.pyplot as plt
except ImportError:
# We can't return an SVG of the CRS, so let Jupyter fall back to
# a default repr by returning None.
return None
# Produce a visual repr of the Projection instance.
fig, ax = plt.subplots(figsize=(5, 3), subplot_kw={"projection": self})
ax.set_global()
ax.coastlines("auto")
ax.gridlines()
buf = six.StringIO()
fig.savefig(buf, format="svg", bbox_inches="tight")
plt.close(fig)
# "Rewind" the buffer to the start and return it as an svg string.
buf.seek(0)
svg = buf.read()
return "{}<pre>{}</pre>".format(svg, escape(repr(self)))
|
def _repr_html_(self):
import cgi
try:
# As matplotlib is not a core cartopy dependency, don't error
# if it's not available.
import matplotlib.pyplot as plt
except ImportError:
# We can't return an SVG of the CRS, so let Jupyter fall back to
# a default repr by returning None.
return None
# Produce a visual repr of the Projection instance.
fig, ax = plt.subplots(figsize=(5, 3), subplot_kw={"projection": self})
ax.set_global()
ax.coastlines("auto")
ax.gridlines()
buf = six.StringIO()
fig.savefig(buf, format="svg", bbox_inches="tight")
plt.close(fig)
# "Rewind" the buffer to the start and return it as an svg string.
buf.seek(0)
svg = buf.read()
return "{}<pre>{}</pre>".format(svg, cgi.escape(repr(self)))
|
https://github.com/SciTools/cartopy/issues/1395
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/miniconda3/envs/py38/lib/python3.8/site-packages/IPython/core/formatters.py in __call__(self, obj)
343 method = get_real_method(obj, self.print_method)
344 if method is not None:
--> 345 return method()
346 return None
347 else:
~/miniconda3/envs/py38/lib/python3.8/site-packages/cartopy/crs.py in _repr_html_(self)
180 buf.seek(0)
181 svg = buf.read()
--> 182 return '{}<pre>{}</pre>'.format(svg, cgi.escape(repr(self)))
183
184 def _as_mpl_axes(self):
AttributeError: module 'cgi' has no attribute 'escape'
|
AttributeError
|
def __init__(self, desired_tile_form="RGB", user_agent="cartopybot/1.0"):
self.imgs = []
self.crs = ccrs.Mercator.GOOGLE
self.desired_tile_form = desired_tile_form
self.user_agent = user_agent
|
def __init__(self, desired_tile_form="RGB"):
self.imgs = []
self.crs = ccrs.Mercator.GOOGLE
self.desired_tile_form = desired_tile_form
|
https://github.com/SciTools/cartopy/issues/1341
|
ValueError Traceback (most recent call last)
~/miniconda3/envs/education/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/miniconda3/envs/education/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/miniconda3/envs/education/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2054 orientation=orientation,
2055 dryrun=True,
-> 2056 **kwargs)
2057 renderer = self.figure._cachedRenderer
2058 bbox_artists = kwargs.pop("bbox_extra_artists", None)
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
--> 388 self.figure.draw(self.renderer)
389 # A GUI class may be need to update a window using this draw, so
390 # don't forget to call the superclass.
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
36 renderer.start_filter()
37
---> 38 return draw(artist, renderer, *args, **kwargs)
39 finally:
40 if artist.get_agg_filter() is not None:
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/figure.py in draw(self, renderer)
1707 self.patch.draw(renderer)
1708 mimage._draw_list_compositing_images(
-> 1709 renderer, self, artists, self.suppressComposite)
1710
1711 renderer.close_group('figure')
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, artists, suppress_composite)
133 if not_composite or not has_images:
134 for a in artists:
--> 135 a.draw(renderer)
136 else:
137 # Composite any adjacent images together
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
36 renderer.start_filter()
37
---> 38 return draw(artist, renderer, *args, **kwargs)
39 finally:
40 if artist.get_agg_filter() is not None:
~/miniconda3/envs/education/lib/python3.7/site-packages/cartopy/mpl/geoaxes.py in draw(self, renderer, inframe)
380 for factory, args, kwargs in self.img_factories:
381 img, extent, origin = factory.image_for_domain(
--> 382 self._get_extent_geom(factory.crs), args[0])
383 self.imshow(img, extent=extent, origin=origin,
384 transform=factory.crs, *args[1:], **kwargs)
~/miniconda3/envs/education/lib/python3.7/site-packages/cartopy/io/img_tiles.py in image_for_domain(self, target_domain, target_z)
66 tiles.append([img, x, y, origin])
67
---> 68 img, extent, origin = _merge_tiles(tiles)
69 return img, extent, origin
70
~/miniconda3/envs/education/lib/python3.7/site-packages/cartopy/io/img_tiles.py in _merge_tiles(tiles)
503 """Return a single image, merging the given images."""
504 if not tiles:
--> 505 raise ValueError('A non-empty list of tiles should '
506 'be provided to merge.')
507 xset = [set(x) for i, x, y, _ in tiles]
ValueError: A non-empty list of tiles should be provided to merge.
|
ValueError
|
def get_image(self, tile):
if six.PY3:
from urllib.request import urlopen, Request, HTTPError, URLError
else:
from urllib2 import urlopen, Request, HTTPError, URLError
url = self._image_url(tile)
try:
request = Request(url, headers={"user-agent": self.user_agent})
fh = urlopen(request)
im_data = six.BytesIO(fh.read())
fh.close()
img = Image.open(im_data)
except (HTTPError, URLError) as err:
print(err)
img = Image.fromarray(np.full((256, 256, 3), (250, 250, 250), dtype=np.uint8))
img = img.convert(self.desired_tile_form)
return img, self.tileextent(tile), "lower"
|
def get_image(self, tile):
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
url = self._image_url(tile)
fh = urlopen(url)
im_data = six.BytesIO(fh.read())
fh.close()
img = Image.open(im_data)
img = img.convert(self.desired_tile_form)
return img, self.tileextent(tile), "lower"
|
https://github.com/SciTools/cartopy/issues/1341
|
ValueError Traceback (most recent call last)
~/miniconda3/envs/education/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/miniconda3/envs/education/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/miniconda3/envs/education/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2054 orientation=orientation,
2055 dryrun=True,
-> 2056 **kwargs)
2057 renderer = self.figure._cachedRenderer
2058 bbox_artists = kwargs.pop("bbox_extra_artists", None)
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
--> 388 self.figure.draw(self.renderer)
389 # A GUI class may be need to update a window using this draw, so
390 # don't forget to call the superclass.
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
36 renderer.start_filter()
37
---> 38 return draw(artist, renderer, *args, **kwargs)
39 finally:
40 if artist.get_agg_filter() is not None:
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/figure.py in draw(self, renderer)
1707 self.patch.draw(renderer)
1708 mimage._draw_list_compositing_images(
-> 1709 renderer, self, artists, self.suppressComposite)
1710
1711 renderer.close_group('figure')
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/image.py in _draw_list_compositing_images(renderer, parent, artists, suppress_composite)
133 if not_composite or not has_images:
134 for a in artists:
--> 135 a.draw(renderer)
136 else:
137 # Composite any adjacent images together
~/miniconda3/envs/education/lib/python3.7/site-packages/matplotlib/artist.py in draw_wrapper(artist, renderer, *args, **kwargs)
36 renderer.start_filter()
37
---> 38 return draw(artist, renderer, *args, **kwargs)
39 finally:
40 if artist.get_agg_filter() is not None:
~/miniconda3/envs/education/lib/python3.7/site-packages/cartopy/mpl/geoaxes.py in draw(self, renderer, inframe)
380 for factory, args, kwargs in self.img_factories:
381 img, extent, origin = factory.image_for_domain(
--> 382 self._get_extent_geom(factory.crs), args[0])
383 self.imshow(img, extent=extent, origin=origin,
384 transform=factory.crs, *args[1:], **kwargs)
~/miniconda3/envs/education/lib/python3.7/site-packages/cartopy/io/img_tiles.py in image_for_domain(self, target_domain, target_z)
66 tiles.append([img, x, y, origin])
67
---> 68 img, extent, origin = _merge_tiles(tiles)
69 return img, extent, origin
70
~/miniconda3/envs/education/lib/python3.7/site-packages/cartopy/io/img_tiles.py in _merge_tiles(tiles)
503 """Return a single image, merging the given images."""
504 if not tiles:
--> 505 raise ValueError('A non-empty list of tiles should '
506 'be provided to merge.')
507 xset = [set(x) for i, x, y, _ in tiles]
ValueError: A non-empty list of tiles should be provided to merge.
|
ValueError
|
def barbs(self, x, y, u, v, *args, **kwargs):
"""
Plot a field of barbs.
Extra Kwargs:
* transform: :class:`cartopy.crs.Projection` or matplotlib transform
The coordinate system in which the vectors are defined.
* regrid_shape: int or 2-tuple of ints
If given, specifies that the points where the arrows are
located will be interpolated onto a regular grid in
projection space. If a single integer is given then that
will be used as the minimum grid length dimension, while the
other dimension will be scaled up according to the target
extent's aspect ratio. If a pair of ints are given they
determine the grid length in the x and y directions
respectively.
* target_extent: 4-tuple
If given, specifies the extent in the target CRS that the
regular grid defined by *regrid_shape* will have. Defaults
to the current extent of the map projection.
See :func:`matplotlib.pyplot.barbs` for details on arguments
and keyword arguments.
.. note::
The vector components must be defined as grid eastward and
grid northward.
"""
t = kwargs.get("transform", None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError(
"invalid transform:"
" Spherical barbs are not supported - "
" consider using PlateCarree/RotatedPole."
)
if isinstance(t, ccrs.Projection):
kwargs["transform"] = t._as_mpl_transform(self)
else:
kwargs["transform"] = t
regrid_shape = kwargs.pop("regrid_shape", None)
target_extent = kwargs.pop("target_extent", self.get_extent(self.projection))
if regrid_shape is not None:
# If regridding is required then we'll be handling transforms
# manually and plotting in native coordinates.
regrid_shape = self._regrid_shape_aspect(regrid_shape, target_extent)
if args:
# Interpolate color array as well as vector components.
x, y, u, v, c = vector_scalar_to_grid(
t,
self.projection,
regrid_shape,
x,
y,
u,
v,
args[0],
target_extent=target_extent,
)
args = (c,) + args[1:]
else:
x, y, u, v = vector_scalar_to_grid(
t,
self.projection,
regrid_shape,
x,
y,
u,
v,
target_extent=target_extent,
)
kwargs.pop("transform", None)
elif t != self.projection:
# Transform the vectors if the projection is not the same as the
# data transform.
if (x.ndim == 1 and y.ndim == 1) and (x.shape != u.shape):
x, y = np.meshgrid(x, y)
u, v = self.projection.transform_vectors(t, x, y, u, v)
return matplotlib.axes.Axes.barbs(self, x, y, u, v, *args, **kwargs)
|
def barbs(self, x, y, u, v, *args, **kwargs):
"""
Plot a 2-D field of barbs.
Extra Kwargs:
* transform: :class:`cartopy.crs.Projection` or matplotlib transform
The coordinate system in which the vectors are defined.
* regrid_shape: int or 2-tuple of ints
If given, specifies that the points where the arrows are
located will be interpolated onto a regular grid in
projection space. If a single integer is given then that
will be used as the minimum grid length dimension, while the
other dimension will be scaled up according to the target
extent's aspect ratio. If a pair of ints are given they
determine the grid length in the x and y directions
respectively.
* target_extent: 4-tuple
If given, specifies the extent in the target CRS that the
regular grid defined by *regrid_shape* will have. Defaults
to the current extent of the map projection.
See :func:`matplotlib.pyplot.barbs` for details on arguments
and keyword arguments.
.. note::
The vector components must be defined as grid eastward and
grid northward.
"""
t = kwargs.get("transform", None)
if t is None:
t = self.projection
if isinstance(t, ccrs.CRS) and not isinstance(t, ccrs.Projection):
raise ValueError(
"invalid transform:"
" Spherical barbs are not supported - "
" consider using PlateCarree/RotatedPole."
)
if isinstance(t, ccrs.Projection):
kwargs["transform"] = t._as_mpl_transform(self)
else:
kwargs["transform"] = t
regrid_shape = kwargs.pop("regrid_shape", None)
target_extent = kwargs.pop("target_extent", self.get_extent(self.projection))
if regrid_shape is not None:
# If regridding is required then we'll be handling transforms
# manually and plotting in native coordinates.
regrid_shape = self._regrid_shape_aspect(regrid_shape, target_extent)
if args:
# Interpolate color array as well as vector components.
x, y, u, v, c = vector_scalar_to_grid(
t,
self.projection,
regrid_shape,
x,
y,
u,
v,
args[0],
target_extent=target_extent,
)
args = (c,) + args[1:]
else:
x, y, u, v = vector_scalar_to_grid(
t,
self.projection,
regrid_shape,
x,
y,
u,
v,
target_extent=target_extent,
)
kwargs.pop("transform", None)
elif t != self.projection:
# Transform the vectors if the projection is not the same as the
# data transform.
if x.ndim == 1 and y.ndim == 1:
x, y = np.meshgrid(x, y)
u, v = self.projection.transform_vectors(t, x, y, u, v)
return matplotlib.axes.Axes.barbs(self, x, y, u, v, *args, **kwargs)
|
https://github.com/SciTools/cartopy/issues/806
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-57-ca96cf8f7709> in <module>()
16 ax.barbs(np.array(recon_data['lon']), np.array(recon_data['lat']),
17 np.array(recon_data['u']), np.array(recon_data['v']),
---> 18 np.array(recon_data['peak']), transform=ccrs.PlateCarree())
19
20 # Add text (aligned to the right); save the returned object so we can manipulate it.
/Users/rmay/miniconda3/envs/py35/lib/python3.5/site-packages/cartopy/mpl/geoaxes.py in barbs(self, x, y, u, v, *args, **kwargs)
1580 if x.ndim == 1 and y.ndim == 1:
1581 x, y = np.meshgrid(x, y)
-> 1582 u, v = self.projection.transform_vectors(t, x, y, u, v)
1583 return matplotlib.axes.Axes.barbs(self, x, y, u, v, *args, **kwargs)
1584
lib/cartopy/_crs.pyx in cartopy._crs.CRS.transform_vectors (lib/cartopy/_crs.c:6489)()
ValueError: x, y, u and v arrays must be the same shape
|
ValueError
|
def _axes_domain(self, nx=None, ny=None, background_patch=None):
"""Returns x_range, y_range"""
DEBUG = False
transform = self._crs_transform()
ax_transform = self.axes.transAxes
desired_trans = ax_transform - transform
nx = nx or 30
ny = ny or 30
x = np.linspace(1e-9, 1 - 1e-9, nx)
y = np.linspace(1e-9, 1 - 1e-9, ny)
x, y = np.meshgrid(x, y)
coords = np.concatenate([x.flatten()[:, None], y.flatten()[:, None]], 1)
in_data = desired_trans.transform(coords)
ax_to_bkg_patch = self.axes.transAxes - background_patch.get_transform()
ok = np.zeros(in_data.shape[:-1], dtype=np.bool)
# XXX Vectorise contains_point
for i, val in enumerate(in_data):
# convert the coordinates of the data to the background
# patches coordinates
background_coord = ax_to_bkg_patch.transform(coords[i : i + 1, :])
bkg_patch_contains = background_patch.get_path().contains_point
if bkg_patch_contains(background_coord[0, :]):
color = "r"
ok[i] = True
else:
color = "b"
if DEBUG:
import matplotlib.pyplot as plt
plt.plot(
coords[i, 0],
coords[i, 1],
"o" + color,
clip_on=False,
transform=ax_transform,
)
# plt.text(coords[i, 0], coords[i, 1], str(val), clip_on=False,
# transform=ax_transform, rotation=23,
# horizontalalignment='right')
inside = in_data[ok, :]
# If there were no data points in the axes we just use the x and y
# range of the projection.
if inside.size == 0:
x_range = self.crs.x_limits
y_range = self.crs.y_limits
else:
x_range = np.nanmin(inside[:, 0]), np.nanmax(inside[:, 0])
y_range = np.nanmin(inside[:, 1]), np.nanmax(inside[:, 1])
# XXX Cartopy specific thing. Perhaps make this bit a specialisation
# in a subclass...
crs = self.crs
if isinstance(crs, Projection):
x_range = np.clip(x_range, *crs.x_limits)
y_range = np.clip(y_range, *crs.y_limits)
# if the limit is >90% of the full x limit, then just use the full
# x limit (this makes circular handling better)
prct = np.abs(np.diff(x_range) / np.diff(crs.x_limits))
if prct > 0.9:
x_range = crs.x_limits
return x_range, y_range
|
def _axes_domain(self, nx=None, ny=None, background_patch=None):
"""Returns x_range, y_range"""
DEBUG = False
transform = self._crs_transform()
ax_transform = self.axes.transAxes
desired_trans = ax_transform - transform
nx = nx or 30
ny = ny or 30
x = np.linspace(1e-9, 1 - 1e-9, nx)
y = np.linspace(1e-9, 1 - 1e-9, ny)
x, y = np.meshgrid(x, y)
coords = np.concatenate([x.flatten()[:, None], y.flatten()[:, None]], 1)
in_data = desired_trans.transform(coords)
ax_to_bkg_patch = self.axes.transAxes - background_patch.get_transform()
ok = np.zeros(in_data.shape[:-1], dtype=np.bool)
# XXX Vectorise contains_point
for i, val in enumerate(in_data):
# convert the coordinates of the data to the background
# patches coordinates
background_coord = ax_to_bkg_patch.transform(coords[i : i + 1, :])
bkg_patch_contains = background_patch.get_path().contains_point
if bkg_patch_contains(background_coord[0, :]):
color = "r"
ok[i] = True
else:
color = "b"
if DEBUG:
import matplotlib.pyplot as plt
plt.plot(
coords[i, 0],
coords[i, 1],
"o" + color,
clip_on=False,
transform=ax_transform,
)
# plt.text(coords[i, 0], coords[i, 1], str(val), clip_on=False,
# transform=ax_transform, rotation=23,
# horizontalalignment='right')
inside = in_data[ok, :]
x_range = np.nanmin(inside[:, 0]), np.nanmax(inside[:, 0])
y_range = np.nanmin(inside[:, 1]), np.nanmax(inside[:, 1])
# XXX Cartopy specific thing. Perhaps make this bit a specialisation
# in a subclass...
crs = self.crs
if isinstance(crs, Projection):
x_range = np.clip(x_range, *crs.x_limits)
y_range = np.clip(y_range, *crs.y_limits)
# if the limit is >90 of the full x limit, then just use the full
# x limit (this makes circular handling better)
prct = np.abs(np.diff(x_range) / np.diff(crs.x_limits))
if prct > 0.9:
x_range = crs.x_limits
return x_range, y_range
|
https://github.com/SciTools/cartopy/issues/322
|
ValueError: zero-size array to fmin.reduce without identity
Exception in Tkinter callback
Traceback (most recent call last):
File "/usr/local/sci/lib/python2.7/lib-tk/Tkinter.py", line 1410, in __call__
return self.func(*args)
File "/usr/local/sci/lib/python2.7/site-packages/matplotlib/backends/backend_tkagg.py", line 276, in resize
self.show()
File "/usr/local/sci/lib/python2.7/site-packages/matplotlib/backends/backend_tkagg.py", line 348, in draw
FigureCanvasAgg.draw(self)
File "/usr/local/sci/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 439, in draw
self.figure.draw(self.renderer)
File "/usr/local/sci/lib/python2.7/site-packages/matplotlib/artist.py", line 54, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/usr/local/sci/lib/python2.7/site-packages/matplotlib/figure.py", line 999, in draw
func(*args)
File "/usr/local/sci/lib/python2.7/site-packages/matplotlib/artist.py", line 54, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/net/home/h04/mwalker/Git/cartopy/lib/cartopy/mpl/geoaxes.py", line 279, in draw
gl._draw_gridliner(background_patch=self.background_patch)
File "/net/home/h04/mwalker/Git/cartopy/lib/cartopy/mpl/gridliner.py", line 280, in _draw_gridliner
background_patch=background_patch)
File "/net/home/h04/mwalker/Git/cartopy/lib/cartopy/mpl/gridliner.py", line 428, in _axes_domain
x_range = np.nanmin(inside[:, 0]), np.nanmax(inside[:, 0])
File "/usr/local/sci/lib/python2.7/site-packages/numpy/lib/function_base.py", line 1507, in nanmin
return np.fmin.reduce(a.flat)
SystemError: error return without exception set
|
SystemError
|
def __init__(self, *args, **kwargs):
"""
Parameters
----------
token : str, required. Discord token
[default: ${TQDM_DISCORD_TOKEN}].
channel_id : int, required. Discord channel ID
[default: ${TQDM_DISCORD_CHANNEL_ID}].
mininterval : float, optional.
Minimum of [default: 1.5] to avoid rate limit.
See `tqdm.auto.tqdm.__init__` for other parameters.
"""
if not kwargs.get("disable"):
kwargs = kwargs.copy()
logging.getLogger("HTTPClient").setLevel(logging.WARNING)
self.dio = DiscordIO(
kwargs.pop("token", getenv("TQDM_DISCORD_TOKEN")),
kwargs.pop("channel_id", getenv("TQDM_DISCORD_CHANNEL_ID")),
)
kwargs["mininterval"] = max(1.5, kwargs.get("mininterval", 1.5))
super(tqdm_discord, self).__init__(*args, **kwargs)
|
def __init__(self, *args, **kwargs):
"""
Parameters
----------
token : str, required. Discord token
[default: ${TQDM_DISCORD_TOKEN}].
channel_id : int, required. Discord channel ID
[default: ${TQDM_DISCORD_CHANNEL_ID}].
mininterval : float, optional.
Minimum of [default: 1.5] to avoid rate limit.
See `tqdm.auto.tqdm.__init__` for other parameters.
"""
kwargs = kwargs.copy()
logging.getLogger("HTTPClient").setLevel(logging.WARNING)
self.dio = DiscordIO(
kwargs.pop("token", getenv("TQDM_DISCORD_TOKEN")),
kwargs.pop("channel_id", getenv("TQDM_DISCORD_CHANNEL_ID")),
)
kwargs["mininterval"] = max(1.5, kwargs.get("mininterval", 1.5))
super(tqdm_discord, self).__init__(*args, **kwargs)
|
https://github.com/tqdm/tqdm/issues/1125
|
haendel:~/projects/telekom/trunk/sandbox/tqdm> python3
Python 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
4.56.0 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] darwin
t = tqdm.tqdm(total=10, disable=True)
t.reset()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Volumes/projects/telekom/trunk/sandbox/tqdm/tqdm/std.py", line 1348, in reset
self.last_print_t = self.start_t = self._time()
AttributeError: 'tqdm' object has no attribute '_time'
|
AttributeError
|
def clear(self, *args, **kwargs):
super(tqdm_discord, self).clear(*args, **kwargs)
if not self.disable:
self.dio.write("")
|
def clear(self, *args, **kwargs):
super(tqdm_discord, self).clear(*args, **kwargs)
self.dio.write("")
|
https://github.com/tqdm/tqdm/issues/1125
|
haendel:~/projects/telekom/trunk/sandbox/tqdm> python3
Python 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
4.56.0 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] darwin
t = tqdm.tqdm(total=10, disable=True)
t.reset()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Volumes/projects/telekom/trunk/sandbox/tqdm/tqdm/std.py", line 1348, in reset
self.last_print_t = self.start_t = self._time()
AttributeError: 'tqdm' object has no attribute '_time'
|
AttributeError
|
def __init__(self, *args, **kwargs):
"""
Parameters
----------
token : str, required. Telegram token
[default: ${TQDM_TELEGRAM_TOKEN}].
chat_id : str, required. Telegram chat ID
[default: ${TQDM_TELEGRAM_CHAT_ID}].
See `tqdm.auto.tqdm.__init__` for other parameters.
"""
if not kwargs.get("disable"):
kwargs = kwargs.copy()
self.tgio = TelegramIO(
kwargs.pop("token", getenv("TQDM_TELEGRAM_TOKEN")),
kwargs.pop("chat_id", getenv("TQDM_TELEGRAM_CHAT_ID")),
)
super(tqdm_telegram, self).__init__(*args, **kwargs)
|
def __init__(self, *args, **kwargs):
"""
Parameters
----------
token : str, required. Telegram token
[default: ${TQDM_TELEGRAM_TOKEN}].
chat_id : str, required. Telegram chat ID
[default: ${TQDM_TELEGRAM_CHAT_ID}].
See `tqdm.auto.tqdm.__init__` for other parameters.
"""
kwargs = kwargs.copy()
self.tgio = TelegramIO(
kwargs.pop("token", getenv("TQDM_TELEGRAM_TOKEN")),
kwargs.pop("chat_id", getenv("TQDM_TELEGRAM_CHAT_ID")),
)
super(tqdm_telegram, self).__init__(*args, **kwargs)
|
https://github.com/tqdm/tqdm/issues/1125
|
haendel:~/projects/telekom/trunk/sandbox/tqdm> python3
Python 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
4.56.0 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] darwin
t = tqdm.tqdm(total=10, disable=True)
t.reset()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Volumes/projects/telekom/trunk/sandbox/tqdm/tqdm/std.py", line 1348, in reset
self.last_print_t = self.start_t = self._time()
AttributeError: 'tqdm' object has no attribute '_time'
|
AttributeError
|
def clear(self, *args, **kwargs):
super(tqdm_telegram, self).clear(*args, **kwargs)
if not self.disable:
self.tgio.write("")
|
def clear(self, *args, **kwargs):
super(tqdm_telegram, self).clear(*args, **kwargs)
self.tgio.write("")
|
https://github.com/tqdm/tqdm/issues/1125
|
haendel:~/projects/telekom/trunk/sandbox/tqdm> python3
Python 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
4.56.0 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] darwin
t = tqdm.tqdm(total=10, disable=True)
t.reset()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Volumes/projects/telekom/trunk/sandbox/tqdm/tqdm/std.py", line 1348, in reset
self.last_print_t = self.start_t = self._time()
AttributeError: 'tqdm' object has no attribute '_time'
|
AttributeError
|
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
if self.disable:
return super(tqdm_notebook, self).reset(total=total)
_, pbar, _ = self.container.children
pbar.bar_style = ""
if total is not None:
pbar.max = total
if not self.total and self.ncols is None: # no longer unknown total
pbar.layout.width = None # reset width
return super(tqdm_notebook, self).reset(total=total)
|
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
_, pbar, _ = self.container.children
pbar.bar_style = ""
if total is not None:
pbar.max = total
if not self.total and self.ncols is None: # no longer unknown total
pbar.layout.width = None # reset width
return super(tqdm_notebook, self).reset(total=total)
|
https://github.com/tqdm/tqdm/issues/1125
|
haendel:~/projects/telekom/trunk/sandbox/tqdm> python3
Python 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
4.56.0 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] darwin
t = tqdm.tqdm(total=10, disable=True)
t.reset()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Volumes/projects/telekom/trunk/sandbox/tqdm/tqdm/std.py", line 1348, in reset
self.last_print_t = self.start_t = self._time()
AttributeError: 'tqdm' object has no attribute '_time'
|
AttributeError
|
def unpause(self):
"""Restart tqdm timer from last print time."""
if self.disable:
return
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
|
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
|
https://github.com/tqdm/tqdm/issues/1125
|
haendel:~/projects/telekom/trunk/sandbox/tqdm> python3
Python 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
4.56.0 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] darwin
t = tqdm.tqdm(total=10, disable=True)
t.reset()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Volumes/projects/telekom/trunk/sandbox/tqdm/tqdm/std.py", line 1348, in reset
self.last_print_t = self.start_t = self._time()
AttributeError: 'tqdm' object has no attribute '_time'
|
AttributeError
|
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.n = 0
if total is not None:
self.total = total
if self.disable:
return
self.last_print_n = 0
self.last_print_t = self.start_t = self._time()
self._ema_dn = EMA(self.smoothing)
self._ema_dt = EMA(self.smoothing)
self._ema_miniters = EMA(self.smoothing)
self.refresh()
|
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
self._ema_dn = EMA(self.smoothing)
self._ema_dt = EMA(self.smoothing)
self._ema_miniters = EMA(self.smoothing)
if total is not None:
self.total = total
self.refresh()
|
https://github.com/tqdm/tqdm/issues/1125
|
haendel:~/projects/telekom/trunk/sandbox/tqdm> python3
Python 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
4.56.0 3.9.1 (default, Jan 8 2021, 17:17:43)
[Clang 12.0.0 (clang-1200.0.32.28)] darwin
t = tqdm.tqdm(total=10, disable=True)
t.reset()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Volumes/projects/telekom/trunk/sandbox/tqdm/tqdm/std.py", line 1348, in reset
self.last_print_t = self.start_t = self._time()
AttributeError: 'tqdm' object has no attribute '_time'
|
AttributeError
|
def format_dict(self):
"""Public API for read-only member access."""
if self.disable and not hasattr(self, "unit"):
return defaultdict(
lambda: None, {"n": self.n, "total": self.total, "elapsed": 0, "unit": "it"}
)
if self.dynamic_ncols:
self.ncols, self.nrows = self.dynamic_ncols(self.fp)
return {
"n": self.n,
"total": self.total,
"elapsed": self._time() - self.start_t if hasattr(self, "start_t") else 0,
"ncols": self.ncols,
"nrows": self.nrows,
"prefix": self.desc,
"ascii": self.ascii,
"unit": self.unit,
"unit_scale": self.unit_scale,
"rate": self._ema_dn() / self._ema_dt() if self._ema_dt() else None,
"bar_format": self.bar_format,
"postfix": self.postfix,
"unit_divisor": self.unit_divisor,
"initial": self.initial,
"colour": self.colour,
}
|
def format_dict(self):
"""Public API for read-only member access."""
if self.dynamic_ncols:
self.ncols, self.nrows = self.dynamic_ncols(self.fp)
ncols, nrows = self.ncols, self.nrows
return {
"n": self.n,
"total": self.total,
"elapsed": self._time() - self.start_t if hasattr(self, "start_t") else 0,
"ncols": ncols,
"nrows": nrows,
"prefix": self.desc,
"ascii": self.ascii,
"unit": self.unit,
"unit_scale": self.unit_scale,
"rate": self._ema_dn() / self._ema_dt() if self._ema_dt() else None,
"bar_format": self.bar_format,
"postfix": self.postfix,
"unit_divisor": self.unit_divisor,
"initial": self.initial,
"colour": self.colour,
}
|
https://github.com/tqdm/tqdm/issues/624
|
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
('4.26.0', '2.7.14 (default, Mar 22 2018, 15:04:47) \n[GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]', 'darwin')
a = tqdm.tqdm([], disable=True)
print a
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/kratsg/.virtualenvs/pyhf/lib/python2.7/site-packages/tqdm/_tqdm.py", line 894, in __repr__
elapsed if elapsed is not None else self._time() - self.start_t,
AttributeError: 'tqdm' object has no attribute '_time'
str(a)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/kratsg/.virtualenvs/pyhf/lib/python2.7/site-packages/tqdm/_tqdm.py", line 894, in __repr__
elapsed if elapsed is not None else self._time() - self.start_t,
AttributeError: 'tqdm' object has no attribute '_time'
dir(a)
['__class__', '__del__', '__delattr__', '__dict__', '__doc__', '__enter__', '__eq__', '__exit__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_comparable', '_decr_instances', '_get_free_pos', '_instances', '_lock', 'clear', 'close', 'disable', 'external_write_mode', 'format_interval', 'format_meter', 'format_sizeof', 'get_lock', 'iterable', 'monitor', 'monitor_interval', 'moveto', 'n', 'pandas', 'pos', 'refresh', 'set_description', 'set_description_str', 'set_lock', 'set_postfix', 'set_postfix_str', 'status_printer', 'total', 'unpause', 'update', 'write']
|
AttributeError
|
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
nrows = self.nrows or 20
if pos >= nrows - 1:
if pos >= nrows:
return False
if msg or msg is None: # override at `nrows - 1`
msg = " ... (more hidden) ..."
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, "write", sys.stderr.write),
)
if pos:
self.moveto(pos)
self.sp(self.__str__() if msg is None else msg)
if pos:
self.moveto(-pos)
return True
|
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
nrows = self.nrows or 20
if pos >= nrows - 1:
if pos >= nrows:
return False
if msg or msg is None: # override at `nrows - 1`
msg = " ... (more hidden) ..."
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, "write", sys.stderr.write),
)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
return True
|
https://github.com/tqdm/tqdm/issues/624
|
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
('4.26.0', '2.7.14 (default, Mar 22 2018, 15:04:47) \n[GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]', 'darwin')
a = tqdm.tqdm([], disable=True)
print a
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/kratsg/.virtualenvs/pyhf/lib/python2.7/site-packages/tqdm/_tqdm.py", line 894, in __repr__
elapsed if elapsed is not None else self._time() - self.start_t,
AttributeError: 'tqdm' object has no attribute '_time'
str(a)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/kratsg/.virtualenvs/pyhf/lib/python2.7/site-packages/tqdm/_tqdm.py", line 894, in __repr__
elapsed if elapsed is not None else self._time() - self.start_t,
AttributeError: 'tqdm' object has no attribute '_time'
dir(a)
['__class__', '__del__', '__delattr__', '__dict__', '__doc__', '__enter__', '__eq__', '__exit__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_comparable', '_decr_instances', '_get_free_pos', '_instances', '_lock', 'clear', 'close', 'disable', 'external_write_mode', 'format_interval', 'format_meter', 'format_sizeof', 'get_lock', 'iterable', 'monitor', 'monitor_interval', 'moveto', 'n', 'pandas', 'pos', 'refresh', 'set_description', 'set_description_str', 'set_lock', 'set_postfix', 'set_postfix_str', 'status_printer', 'total', 'unpause', 'update', 'write']
|
AttributeError
|
def __new__(cls, *_, **__):
instance = object.__new__(cls)
with cls.get_lock(): # also constructs lock if non-existent
cls._instances.add(instance)
# create monitoring thread
if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn(
"tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning,
stacklevel=2,
)
cls.monitor_interval = 0
return instance
|
def __new__(cls, *_, **__):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, "_instances"):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn(
"tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning,
stacklevel=2,
)
cls.monitor_interval = 0
# Return the instance
return instance
|
https://github.com/tqdm/tqdm/issues/1084
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<timed exec> in <module>
/kaggle/usr/lib/nlp_profiler_class/nlp_profiler_class.py in apply_text_profiling(self, dataframe, text_column, params)
94 """
95
---> 96 return nlp_profiler.apply_text_profiling(dataframe, text_column, params)
/opt/conda/lib/python3.7/site-packages/nlp_profiler/core.py in apply_text_profiling(dataframe, text_column, params)
60 actions_mappings.remove(item)
61
---> 62 apply_profiling_progress_bar = get_progress_bar(actions_mappings)
63 for _, (param, action_description, action_function) in \
64 enumerate(apply_profiling_progress_bar):
/opt/conda/lib/python3.7/site-packages/nlp_profiler/generate_features/parallelisation_methods/__init__.py in get_progress_bar(values)
18
19 def get_progress_bar(values: list) -> tqdm:
---> 20 return tqdm(values, ncols=PROGRESS_BAR_WIDTH)
21
22
/opt/conda/lib/python3.7/site-packages/tqdm/asyncio.py in __new__(cls, *args, **kwargs)
64
65 def __new__(cls, *args, **kwargs):
---> 66 return cls.get_new(super(tqdm_asyncio, cls), std_tqdm, *args, **kwargs)
67
68
AttributeError: type object 'tqdm' has no attribute 'get_new'
|
AttributeError
|
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = _force_encoding(file) if file in (sys.stdout, sys.stderr) else file
fp_flush = getattr(fp, "flush", lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write("\r" + s + (" " * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
|
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, "flush", lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write("\r" + s + (" " * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
|
https://github.com/tqdm/tqdm/issues/127
|
0%| | 0/20 [00:00<?, ?it/s]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 483, in __iter__
1 / avg_time if avg_time else None, bar_format))
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 95, in print_status
fp.write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 6-16: ordinal not in range(128)
|
UnicodeEncodeError
|
def main():
args = argopt(__doc__, version=__version__).parse_args()
if args.debug_trace:
args.debug = "NOTSET"
logging.basicConfig(
level=getattr(logging, args.debug, logging.INFO),
format="%(levelname)s:%(message)s",
)
log = logging.getLogger(__name__)
log.debug(args)
# Get compressed sizes
zips = {}
for fn in args.zipfiles:
info = subprocess.check_output(["7z", "l", fn]).strip()
finfo = RE_SCN.findall(info) # size|compressed|name
# builtin test: last line should be total sizes
log.debug(finfo)
totals = map(int, finfo[-1][:2])
# log.debug(totals)
for s in range(2): # size|compressed totals
totals_s = sum(map(int, (inf[s] for inf in finfo[:-1])))
if totals_s != totals[s]:
log.warn(
"%s: individual total %d != 7z total %d" % (fn, totals_s, totals[s])
)
fcomp = {n: int(c if args.compressed else u) for (u, c, n) in finfo[:-1]}
# log.debug(fcomp)
# zips : {'zipname' : {'filename' : int(size)}}
zips[fn] = fcomp
# Extract
cmd7zx = ["7z", "x", "-bd"]
if args.yes:
cmd7zx += ["-y"]
log.info("Extracting from {:d} file(s)".format(len(zips)))
with tqdm(
total=sum(sum(fcomp.values()) for fcomp in zips.values()),
unit="B",
unit_scale=True,
) as tall:
for fn, fcomp in zips.items():
md, sd = pty.openpty()
ex = subprocess.Popen(
cmd7zx + [fn],
bufsize=1,
stdout=md, # subprocess.PIPE,
stderr=subprocess.STDOUT,
)
os.close(sd)
with io.open(md, mode="rU", buffering=1) as m:
with tqdm(
total=sum(fcomp.values()),
disable=len(zips) < 2,
leave=False,
unit="B",
unit_scale=True,
) as t:
if not hasattr(t, "start_t"): # disabled
t.start_t = tall._time()
while True:
try:
l_raw = m.readline()
except IOError:
break
ln = l_raw.strip()
if ln.startswith("Extracting"):
exname = ln.lstrip("Extracting").lstrip()
s = fcomp.get(exname, 0) # 0 is likely folders
t.update(s)
tall.update(s)
elif ln:
if not any(
ln.startswith(i)
for i in (
"7-Zip ",
"p7zip Version ",
"Everything is Ok",
"Folders: ",
"Files: ",
"Size: ",
"Compressed: ",
)
):
if ln.startswith("Processing archive: "):
if not args.silent:
t.write(
t.format_interval(t.start_t - tall.start_t)
+ " "
+ ln.lstrip("Processing archive: ")
)
else:
t.write(ln)
ex.wait()
|
def main():
args = argopt(__doc__, version=__version__).parse_args()
if args.debug_trace:
args.debug = "NOTSET"
logging.basicConfig(
level=getattr(logging, args.debug, logging.INFO),
format="%(levelname)s:%(message)s",
)
log = logging.getLogger(__name__)
log.debug(args)
# Get compressed sizes
zips = {}
for fn in args.zipfiles:
info = subprocess.check_output(["7z", "l", fn]).strip()
finfo = RE_SCN.findall(info) # size|compressed|name
# builtin test: last line should be total sizes
log.debug(finfo)
totals = map(int, finfo[-1][:2])
# log.debug(totals)
for s in range(2): # size|compressed totals
totals_s = sum(map(int, (inf[s] for inf in finfo[:-1])))
if totals_s != totals[s]:
log.warn(
"%s: individual total %d != 7z total %d" % (fn, totals_s, totals[s])
)
fcomp = dict((n, int(c if args.compressed else u)) for (u, c, n) in finfo[:-1])
# log.debug(fcomp)
# zips : {'zipname' : {'filename' : int(size)}}
zips[fn] = fcomp
# Extract
cmd7zx = ["7z", "x", "-bd"]
if args.yes:
cmd7zx += ["-y"]
log.info("Extracting from {:d} file(s)".format(len(zips)))
with tqdm(
total=sum(sum(fcomp.values()) for fcomp in zips.values()),
unit="B",
unit_scale=True,
) as tall:
for fn, fcomp in zips.items():
md, sd = pty.openpty()
ex = subprocess.Popen(
cmd7zx + [fn],
bufsize=1,
stdout=md, # subprocess.PIPE,
stderr=subprocess.STDOUT,
)
os.close(sd)
with io.open(md, mode="rU", buffering=1) as m:
with tqdm(
total=sum(fcomp.values()),
disable=len(zips) < 2,
leave=False,
unit="B",
unit_scale=True,
) as t:
if not hasattr(t, "start_t"): # disabled
t.start_t = tall._time()
while True:
try:
l_raw = m.readline()
except IOError:
break
ln = l_raw.strip()
if ln.startswith("Extracting"):
exname = ln.lstrip("Extracting").lstrip()
s = fcomp.get(exname, 0) # 0 is likely folders
t.update(s)
tall.update(s)
elif ln:
if not any(
ln.startswith(i)
for i in (
"7-Zip ",
"p7zip Version ",
"Everything is Ok",
"Folders: ",
"Files: ",
"Size: ",
"Compressed: ",
)
):
if ln.startswith("Processing archive: "):
if not args.silent:
t.write(
t.format_interval(t.start_t - tall.start_t)
+ " "
+ ln.lstrip("Processing archive: ")
)
else:
t.write(ln)
ex.wait()
|
https://github.com/tqdm/tqdm/issues/127
|
0%| | 0/20 [00:00<?, ?it/s]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 483, in __iter__
1 / avg_time if avg_time else None, bar_format))
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 95, in print_status
fp.write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 6-16: ordinal not in range(128)
|
UnicodeEncodeError
|
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ", ".join(key + "=" + postfix[key].strip() for key in postfix.keys())
if refresh:
self.refresh()
|
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ", ".join(key + "=" + postfix[key].strip() for key in postfix.keys())
if refresh:
self.refresh()
|
https://github.com/tqdm/tqdm/issues/127
|
0%| | 0/20 [00:00<?, ?it/s]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 483, in __iter__
1 / avg_time if avg_time else None, bar_format))
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 95, in print_status
fp.write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 6-16: ordinal not in range(128)
|
UnicodeEncodeError
|
def __init__(self, callback, stream, method="read"):
"""
Wrap a given `file`-like object's `read()` or `write()` to report
lengths to the given `callback`
"""
super(CallbackIOWrapper, self).__init__(stream)
func = getattr(stream, method)
if method == "write":
@wraps(func)
def write(data, *args, **kwargs):
res = func(data, *args, **kwargs)
callback(len(data))
return res
self.wrapper_setattr("write", write)
elif method == "read":
@wraps(func)
def read(*args, **kwargs):
data = func(*args, **kwargs)
callback(len(data))
return data
self.wrapper_setattr("read", read)
else:
raise KeyError("Can only wrap read/write methods")
|
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError("expected at 1 argument, got %d", len(args))
if not hasattr(self, "_keys"):
self._keys = []
self.update(*args, **kwds)
|
https://github.com/tqdm/tqdm/issues/127
|
0%| | 0/20 [00:00<?, ?it/s]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 483, in __iter__
1 / avg_time if avg_time else None, bar_format))
File "/opt/proj/venv/lib/python2.6/site-packages/tqdm/_tqdm.py", line 95, in print_status
fp.write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 6-16: ordinal not in range(128)
|
UnicodeEncodeError
|
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
cls = type(self)
root_lock = cls.th_lock
if root_lock is not None:
root_lock.acquire()
cls.create_mp_lock()
if root_lock is not None:
root_lock.release()
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
|
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
|
https://github.com/tqdm/tqdm/issues/982
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/usr/lib/python3.7/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "tqdm_fail.py", line 11, in race_write
tqdm.write("spam")
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 586, in write
fp.write(end)
File "/usr/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 616, in external_write_mode
cls._lock.release()
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 93, in release
lock.release()
AssertionError: attempt to release recursive lock not owned by thread
|
AssertionError
|
def create_mp_lock(cls):
if not hasattr(cls, "mp_lock"):
try:
from multiprocessing import RLock
cls.mp_lock = RLock()
except (ImportError, OSError): # pragma: no cover
cls.mp_lock = None
|
def create_mp_lock(cls):
if not hasattr(cls, "mp_lock"):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
|
https://github.com/tqdm/tqdm/issues/982
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/usr/lib/python3.7/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "tqdm_fail.py", line 11, in race_write
tqdm.write("spam")
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 586, in write
fp.write(end)
File "/usr/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 616, in external_write_mode
cls._lock.release()
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 93, in release
lock.release()
AssertionError: attempt to release recursive lock not owned by thread
|
AssertionError
|
def create_th_lock(cls):
assert hasattr(cls, "th_lock")
warn("create_th_lock not needed anymore", TqdmDeprecationWarning, stacklevel=2)
|
def create_th_lock(cls):
if not hasattr(cls, "th_lock"):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
|
https://github.com/tqdm/tqdm/issues/982
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/usr/lib/python3.7/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "tqdm_fail.py", line 11, in race_write
tqdm.write("spam")
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 586, in write
fp.write(end)
File "/usr/lib/python3.7/contextlib.py", line 119, in __exit__
next(self.gen)
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 616, in external_write_mode
cls._lock.release()
File "/home/duck/.local/lib/python3.7/site-packages/tqdm/std.py", line 93, in release
lock.release()
AssertionError: attempt to release recursive lock not owned by thread
|
AssertionError
|
def status_printer(_, total=None, desc=None, ncols=None):
"""
Manage the printing of an IPython/Jupyter Notebook progress bar widget.
"""
# Fallback to text bar if there's no total
# DEPRECATED: replaced with an 'info' style bar
# if not total:
# return super(tqdm_notebook, tqdm_notebook).status_printer(file)
# fp = file
# Prepare IPython progress bar
if IProgress is None: # #187 #451 #558 #872
raise ImportError(
"IProgress not found. Please update jupyter and ipywidgets."
" See https://ipywidgets.readthedocs.io/en/stable"
"/user_install.html"
)
if total:
pbar = IProgress(min=0, max=total)
else: # No total? Show info style bar with no progress tqdm status
pbar = IProgress(min=0, max=1)
pbar.value = 1
pbar.bar_style = "info"
if desc:
pbar.description = desc
if IPYW >= 7:
pbar.style.description_width = "initial"
# Prepare status text
ptext = HTML()
# Only way to place text to the right of the bar is to use a container
container = HBox(children=[pbar, ptext])
# Prepare layout
if ncols is not None: # use default style of ipywidgets
# ncols could be 100, "100px", "100%"
ncols = str(ncols) # ipywidgets only accepts string
try:
if int(ncols) > 0: # isnumeric and positive
ncols += "px"
except ValueError:
pass
pbar.layout.flex = "2"
container.layout.width = ncols
container.layout.display = "inline-flex"
container.layout.flex_flow = "row wrap"
display(container)
return container
|
def status_printer(_, total=None, desc=None, ncols=None):
"""
Manage the printing of an IPython/Jupyter Notebook progress bar widget.
"""
# Fallback to text bar if there's no total
# DEPRECATED: replaced with an 'info' style bar
# if not total:
# return super(tqdm_notebook, tqdm_notebook).status_printer(file)
# fp = file
# Prepare IPython progress bar
try:
if total:
pbar = IProgress(min=0, max=total)
else: # No total? Show info style bar with no progress tqdm status
pbar = IProgress(min=0, max=1)
pbar.value = 1
pbar.bar_style = "info"
except NameError:
# #187 #451 #558
raise ImportError(
"FloatProgress not found. Please update jupyter and ipywidgets."
" See https://ipywidgets.readthedocs.io/en/stable"
"/user_install.html"
)
if desc:
pbar.description = desc
if IPYW >= 7:
pbar.style.description_width = "initial"
# Prepare status text
ptext = HTML()
# Only way to place text to the right of the bar is to use a container
container = HBox(children=[pbar, ptext])
# Prepare layout
if ncols is not None: # use default style of ipywidgets
# ncols could be 100, "100px", "100%"
ncols = str(ncols) # ipywidgets only accepts string
try:
if int(ncols) > 0: # isnumeric and positive
ncols += "px"
except ValueError:
pass
pbar.layout.flex = "2"
container.layout.width = ncols
container.layout.display = "inline-flex"
container.layout.flex_flow = "row wrap"
display(container)
return container
|
https://github.com/tqdm/tqdm/issues/872
|
NameError Traceback (most recent call last)
~\Anaconda3\envs\py3_TF2.0\lib\site-packages\tqdm\notebook.py in status_printer(_, total, desc, ncols)
95 try:
---> 96 if total:
97 pbar = IProgress(min=0, max=total)
NameError: name 'IProgress' is not defined
|
NameError
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import (
DataFrameGroupBy,
SeriesGroupBy,
GroupBy,
PanelGroupBy,
)
except ImportError:
from pandas.core.groupby import (
DataFrameGroupBy,
SeriesGroupBy,
GroupBy,
PanelGroupBy,
)
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy
from pandas.core.groupby.groupby import SeriesGroupBy
from pandas.core.groupby.groupby import GroupBy
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.groupby import SeriesGroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby import PanelGroupBy
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
https://github.com/tqdm/tqdm/issues/555
|
from tqdm import tqdm
tqdm.pandas()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/username/.conda/envs/py36/lib/python3.6/site-packages/tqdm/_tqdm.py", line 545, in pandas
from pandas.core.groupby import PanelGroupBy
ImportError: cannot import name 'PanelGroupBy'
|
ImportError
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=1.0.0
from pandas.core.window.rolling import _Rolling_and_Expanding
except ImportError:
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
) # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
) # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
https://github.com/tqdm/tqdm/issues/555
|
from tqdm import tqdm
tqdm.pandas()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/username/.conda/envs/py36/lib/python3.6/site-packages/tqdm/_tqdm.py", line 545, in pandas
from pandas.core.groupby import PanelGroupBy
ImportError: cannot import name 'PanelGroupBy'
|
ImportError
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
) # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
) # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
func = df._is_builtin_func(func)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
https://github.com/tqdm/tqdm/issues/862
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-18-9d87c7bad499> in <module>
1 l = []
2 tqdm.tqdm.pandas(desc="Processing")
----> 3 df["title"].progress_apply(l.extend)
~/.anaconda3/envs/pytorch/lib/python3.7/site-packages/tqdm/std.py in inner(df, func, *args, **kwargs)
733 fp_write=getattr(t.fp, 'write', sys.stderr.write))
734
--> 735 func = df._is_builtin_func(func)
736
737 # Define bar updating wrapper
~/.anaconda3/envs/pytorch/lib/python3.7/site-packages/pandas/core/base.py in _is_builtin_func(self, arg)
664 otherwise return the arg
665 """
--> 666 return self._builtin_table.get(arg, arg)
667
668
TypeError: unhashable type: 'list'
|
TypeError
|
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
|
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
func = df._is_builtin_func(func)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
|
https://github.com/tqdm/tqdm/issues/862
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-18-9d87c7bad499> in <module>
1 l = []
2 tqdm.tqdm.pandas(desc="Processing")
----> 3 df["title"].progress_apply(l.extend)
~/.anaconda3/envs/pytorch/lib/python3.7/site-packages/tqdm/std.py in inner(df, func, *args, **kwargs)
733 fp_write=getattr(t.fp, 'write', sys.stderr.write))
734
--> 735 func = df._is_builtin_func(func)
736
737 # Define bar updating wrapper
~/.anaconda3/envs/pytorch/lib/python3.7/site-packages/pandas/core/base.py in _is_builtin_func(self, arg)
664 otherwise return the arg
665 """
--> 666 return self._builtin_table.get(arg, arg)
667
668
TypeError: unhashable type: 'list'
|
TypeError
|
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
|
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
func = df._is_builtin_func(func)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
|
https://github.com/tqdm/tqdm/issues/862
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-18-9d87c7bad499> in <module>
1 l = []
2 tqdm.tqdm.pandas(desc="Processing")
----> 3 df["title"].progress_apply(l.extend)
~/.anaconda3/envs/pytorch/lib/python3.7/site-packages/tqdm/std.py in inner(df, func, *args, **kwargs)
733 fp_write=getattr(t.fp, 'write', sys.stderr.write))
734
--> 735 func = df._is_builtin_func(func)
736
737 # Define bar updating wrapper
~/.anaconda3/envs/pytorch/lib/python3.7/site-packages/pandas/core/base.py in _is_builtin_func(self, arg)
664 otherwise return the arg
665 """
--> 666 return self._builtin_table.get(arg, arg)
667
668
TypeError: unhashable type: 'list'
|
TypeError
|
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
|
def __init__(self, frac, default_len=10, charset=UTF):
assert 0 <= frac <= 1
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
|
https://github.com/tqdm/tqdm/issues/859
|
4.40.0 3.6.9 |Anaconda, Inc.| (default, Jul 30 2019, 19:07:31)
[GCC 7.3.0] linux
0%| | 0/9.6 [00:00<?, ?it/s\
]Traceback (most recent call last):
File "tqdm_test.py", line 3, in <module>
for i in tqdm.tqdm(iterable=range(10), total=9.6):
File "/home/aronnem/miniconda3/envs/tqdm_test/lib/python3.6/site-packages/tqdm/std.py", line 1150, in __iter__
self.close()
File "/home/aronnem/miniconda3/envs/tqdm_test/lib/python3.6/site-packages/tqdm/std.py", line 1261, in close
self.display(pos=0)
File "/home/aronnem/miniconda3/envs/tqdm_test/lib/python3.6/site-packages/tqdm/std.py", line 1428, in display
self.sp(self.__repr__() if msg is None else msg)
File "/home/aronnem/miniconda3/envs/tqdm_test/lib/python3.6/site-packages/tqdm/std.py", line 1058, in __repr__
return self.format_meter(**self.format_dict)
File "/home/aronnem/miniconda3/envs/tqdm_test/lib/python3.6/site-packages/tqdm/std.py", line 482, in format_meter
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
File "/home/aronnem/miniconda3/envs/tqdm_test/lib/python3.6/site-packages/tqdm/std.py", line 146, in __init__
assert 0 <= frac <= 1
AssertionError
|
AssertionError
|
def format_meter(
n,
total,
elapsed,
ncols=None,
prefix="",
ascii=False,
unit="it",
unit_scale=False,
rate=None,
bar_format=None,
postfix=None,
unit_divisor=1000,
**extra_kwargs,
):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = (
(
(format_sizeof(rate) if unit_scale else "{0:5.2f}".format(rate))
if rate
else "?"
)
+ unit
+ "/s"
)
rate_inv_fmt = (
(
(format_sizeof(inv_rate) if unit_scale else "{0:5.2f}".format(inv_rate))
if inv_rate
else "?"
)
+ "s/"
+ unit
)
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = (
format_sizeof(total, divisor=unit_divisor) if total is not None else "?"
)
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else "?"
try:
postfix = ", " + postfix if postfix else ""
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else "?"
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = prefix[-2:] == ": "
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ""
r_bar = "| {0}/{1} [{2}<{3}, {4}{5}]".format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix
)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n,
n_fmt=n_fmt,
total=total,
total_fmt=total_fmt,
elapsed=elapsed_str,
elapsed_s=elapsed,
ncols=ncols,
desc=prefix or "",
unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt,
rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt,
rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix,
unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str,
remaining_s=remaining,
l_bar=l_bar,
r_bar=r_bar,
**extra_kwargs,
)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += "{0:3.0f}%|".format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", "")
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
if _is_ascii(bar_format) and any(
not _is_ascii(i) for i in format_dict.values()
):
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,
)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += "|"
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.BLANK,
)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else "") + "{0}{1} [{2}, {3}{4}]".format(
n_fmt, unit, elapsed_str, rate_fmt, postfix
)
|
def format_meter(
n,
total,
elapsed,
ncols=None,
prefix="",
ascii=False,
unit="it",
unit_scale=False,
rate=None,
bar_format=None,
postfix=None,
unit_divisor=1000,
**extra_kwargs,
):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = (
(
(format_sizeof(rate) if unit_scale else "{0:5.2f}".format(rate))
if rate
else "?"
)
+ unit
+ "/s"
)
rate_inv_fmt = (
(
(format_sizeof(inv_rate) if unit_scale else "{0:5.2f}".format(inv_rate))
if inv_rate
else "?"
)
+ "s/"
+ unit
)
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = (
format_sizeof(total, divisor=unit_divisor) if total is not None else "?"
)
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else "?"
try:
postfix = ", " + postfix if postfix else ""
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else "?"
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = prefix[-2:] == ": "
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ""
r_bar = "| {0}/{1} [{2}<{3}, {4}{5}]".format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix
)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n,
n_fmt=n_fmt,
total=total,
total_fmt=total_fmt,
elapsed=elapsed_str,
elapsed_s=elapsed,
ncols=ncols,
desc=prefix or "",
unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt,
rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt,
rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix,
unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str,
remaining_s=remaining,
l_bar=l_bar,
r_bar=r_bar,
**extra_kwargs,
)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += "{0:3.0f}%|".format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", "")
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
if _is_ascii(bar_format) and any(
not _is_ascii(i) for i in format_dict.values()
):
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,
)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += "|"
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.BLANK,
)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else "") + "{0}{1} [{2}, {3}{4}]".format(
n_fmt, unit, elapsed_str, rate_fmt, postfix
)
|
https://github.com/tqdm/tqdm/issues/849
|
Progress: 0% 0/1.0 [00:00<?, ?it/s]Traceback (most recent call last):
File "tqdm_test.py", line 13, in <module>
pbar.close()
File "/home/dan/software/anaconda3/envs/ame/lib/python3.7/site-packages/tqdm/std.py", line 1254, in close
self.display(pos=0)
File "/home/dan/software/anaconda3/envs/ame/lib/python3.7/site-packages/tqdm/std.py", line 1421, in display
self.sp(self.__repr__() if msg is None else msg)
File "/home/dan/software/anaconda3/envs/ame/lib/python3.7/site-packages/tqdm/std.py", line 1052, in __repr__
return self.format_meter(**self.format_dict)
File "/home/dan/software/anaconda3/envs/ame/lib/python3.7/site-packages/tqdm/std.py", line 489, in format_meter
nobar = bar_format.format(bar=full_bar, **format_dict)
TypeError: unsupported format string passed to NoneType.__format__
|
TypeError
|
def format_meter(
n,
total,
elapsed,
ncols=None,
prefix="",
ascii=False,
unit="it",
unit_scale=False,
rate=None,
bar_format=None,
postfix=None,
unit_divisor=1000,
**extra_kwargs,
):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = (
(
(format_sizeof(rate) if unit_scale else "{0:5.2f}".format(rate))
if rate
else "?"
)
+ unit
+ "/s"
)
rate_inv_fmt = (
(
(format_sizeof(inv_rate) if unit_scale else "{0:5.2f}".format(inv_rate))
if inv_rate
else "?"
)
+ "s/"
+ unit
)
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = (
format_sizeof(total, divisor=unit_divisor) if total is not None else "?"
)
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else "?"
try:
postfix = ", " + postfix if postfix else ""
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else "?"
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = prefix[-2:] == ": "
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ""
r_bar = "| {0}/{1} [{2}<{3}, {4}{5}]".format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix
)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n,
n_fmt=n_fmt,
total=total,
total_fmt=total_fmt,
elapsed=elapsed_str,
elapsed_s=elapsed,
ncols=ncols,
desc=prefix or "",
unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt,
rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt,
rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix,
unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str,
remaining_s=remaining,
l_bar=l_bar,
r_bar=r_bar,
**extra_kwargs,
)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += "{0:3.0f}%|".format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", "")
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
if _is_ascii(bar_format) and any(
not _is_ascii(i) for i in format_dict.values()
):
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,
)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += "|"
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.BLANK,
)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else "") + "{0}{1} [{2}, {3}{4}]".format(
n_fmt, unit, elapsed_str, rate_fmt, postfix
)
|
def format_meter(
n,
total,
elapsed,
ncols=None,
prefix="",
ascii=False,
unit="it",
unit_scale=False,
rate=None,
bar_format=None,
postfix=None,
unit_divisor=1000,
**extra_kwargs,
):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = (
(
(format_sizeof(rate) if unit_scale else "{0:5.2f}".format(rate))
if rate
else "?"
)
+ unit
+ "/s"
)
rate_inv_fmt = (
(
(format_sizeof(inv_rate) if unit_scale else "{0:5.2f}".format(inv_rate))
if inv_rate
else "?"
)
+ "s/"
+ unit
)
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = (
format_sizeof(total, divisor=unit_divisor) if total is not None else "?"
)
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else "?"
try:
postfix = ", " + postfix if postfix else ""
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else "?"
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = prefix[-2:] == ": "
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ""
r_bar = "| {0}/{1} [{2}<{3}, {4}{5}]".format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix
)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n,
n_fmt=n_fmt,
total=total,
total_fmt=total_fmt,
elapsed=elapsed_str,
elapsed_s=elapsed,
ncols=ncols,
desc=prefix or "",
unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt,
rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt,
rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix,
unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str,
remaining_s=remaining,
l_bar=l_bar,
r_bar=r_bar,
**extra_kwargs,
)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += "{0:3.0f}%|".format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", "")
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,
)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += "|"
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub("", nobar))) if ncols else 10,
charset=Bar.BLANK,
)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else "") + "{0}{1} [{2}, {3}{4}]".format(
n_fmt, unit, elapsed_str, rate_fmt, postfix
)
|
https://github.com/tqdm/tqdm/issues/851
|
import tqdm, sys
print(tqdm.__version__, sys.version, sys.platform)
('4.34.0', '2.7.15+ (default, Oct 7 2019, 17:39:04) \n[GCC 7.4.0]', 'linux2')
pbar = tqdm.tqdm(total=10, leave=False)
0%| | 0/10 [00:00<?, ?it/s]
pbar.set_description(u"áéíóú")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "tqdm/_tqdm.py", line 1289, in set_description
self.refresh()
File "tqdm/_tqdm.py", line 1251, in refresh
self.display()
File "tqdm/_tqdm.py", line 1374, in display
self.sp(self.__repr__() if msg is None else msg)
File "tqdm/_tqdm.py", line 1020, in __repr__
return self.format_meter(**self.format_dict)
File "tqdm/_tqdm.py", line 462, in format_meter
nobar = bar_format.format(bar=full_bar, **format_dict)
UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-4: ordinal not in range(128)
|
UnicodeEncodeError
|
def display(
self,
msg=None,
pos=None,
# additional signals
close=False,
bar_style=None,
):
# Note: contrary to native tqdm, msg='' does NOT clear bar
# goal is to keep all infos if error happens so user knows
# at which iteration the loop failed.
# Clear previous output (really necessary?)
# clear_output(wait=1)
if not msg and not close:
msg = self.__repr__()
pbar, ptext = self.container.children
pbar.value = self.n
if msg:
# html escape special characters (like '&')
if "<bar/>" in msg:
left, right = map(escape, msg.split("<bar/>", 1))
else:
left, right = "", escape(msg)
# remove inesthetical pipes
if left and left[-1] == "|":
left = left[:-1]
if right and right[0] == "|":
right = right[1:]
# Update description
pbar.description = left
if IPYW >= 7:
pbar.style.description_width = "initial"
# never clear the bar (signal: msg='')
if right:
ptext.value = right
# Change bar style
if bar_style:
# Hack-ish way to avoid the danger bar_style being overridden by
# success because the bar gets closed after the error...
if not (pbar.bar_style == "danger" and bar_style == "success"):
pbar.bar_style = bar_style
# Special signal to close the bar
if close and pbar.bar_style != "danger": # hide only if no error
try:
self.container.close()
except AttributeError:
self.container.visible = False
|
def display(
self,
msg=None,
pos=None,
# additional signals
close=False,
bar_style=None,
):
# Note: contrary to native tqdm, msg='' does NOT clear bar
# goal is to keep all infos if error happens so user knows
# at which iteration the loop failed.
# Clear previous output (really necessary?)
# clear_output(wait=1)
# Update description
if self.desc:
pbar.description = self.desc
self.desc = None # trick to place description before the bar
if IPYW >= 7:
pbar.style.description_width = "initial"
if not msg and not close:
msg = self.__repr__()
pbar, ptext = self.container.children
# Get current iteration value from format_meter string
if self.total:
# n = None
if msg:
npos = msg.find(r"/|/") # cause we use bar_format=r'{n}|...'
# Check that n can be found in msg (else n > total)
if npos >= 0:
n = float(msg[:npos]) # get n from string
msg = msg[npos + 3 :] # remove from string
# Update bar with current n value
if n is not None:
pbar.value = n
# Print stats
if msg: # never clear the bar (signal: msg='')
msg = msg.replace("||", "") # remove inesthetical pipes
msg = escape(msg) # html escape special characters (like '?')
ptext.value = msg
# Change bar style
if bar_style:
# Hack-ish way to avoid the danger bar_style being overridden by
# success because the bar gets closed after the error...
if not (pbar.bar_style == "danger" and bar_style == "success"):
pbar.bar_style = bar_style
# Special signal to close the bar
if close and pbar.bar_style != "danger": # hide only if no error
try:
self.container.close()
except AttributeError:
self.container.visible = False
|
https://github.com/tqdm/tqdm/issues/594
|
for _ in tqdm_notebook(range(10), ncols='400px', bar_format='{bar}'):
sleep(0.1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-aced8917afa9> in <module>()
1 # px ncols
----> 2 for _ in tqdm_notebook(range(10),ncols='400px', bar_format='{bar}'): #400px
3 sleep(0.1)
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/__init__.py in tqdm_notebook(*args, **kwargs)
23 """See tqdm._tqdm_notebook.tqdm_notebook for full documentation"""
24 from ._tqdm_notebook import tqdm_notebook as _tqdm_notebook
---> 25 return _tqdm_notebook(*args, **kwargs)
26
27
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/_tqdm_notebook.py in __init__(self, *args, **kwargs)
203 # Print initial bar state
204 if not self.disable:
--> 205 self.sp(self.__repr__()) # same as self.refresh without clearing
206
207 def __iter__(self, *args, **kwargs):
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/_tqdm.py in __repr__(self, elapsed)
890 self.desc, self.ascii, self.unit,
891 self.unit_scale, 1 / self.avg_time if self.avg_time else None,
--> 892 self.bar_format, self.postfix, self.unit_divisor)
893
894 @property
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/_tqdm.py in format_meter(n, total, elapsed, ncols, prefix, ascii, unit, unit_scale, rate, bar_format, postfix, unit_divisor)
363 # Formatting progress bar
364 # space available for bar's display
--> 365 N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
366 else 10
367
TypeError: unsupported operand type(s) for -: 'str' and 'int'
|
TypeError
|
def __init__(self, *args, **kwargs):
# Setup default output
if kwargs.get("file", sys.stderr) is sys.stderr:
kwargs["file"] = sys.stdout # avoid the red block in IPython
# Initialize parent class + avoid printing by using gui=True
kwargs["gui"] = True
kwargs.setdefault("bar_format", "{l_bar}{bar}{r_bar}")
kwargs["bar_format"] = kwargs["bar_format"].replace("{bar}", "<bar/>")
super(tqdm_notebook, self).__init__(*args, **kwargs)
if self.disable or not kwargs["gui"]:
return
# Get bar width
self.ncols = "100%" if self.dynamic_ncols else kwargs.get("ncols", None)
# Replace with IPython progress bar display (with correct total)
unit_scale = 1 if self.unit_scale is True else self.unit_scale or 1
total = self.total * unit_scale if self.total else self.total
self.container = self.status_printer(self.fp, total, self.desc, self.ncols)
self.sp = self.display
# Print initial bar state
if not self.disable:
self.display()
|
def __init__(self, *args, **kwargs):
# Setup default output
if kwargs.get("file", sys.stderr) is sys.stderr:
kwargs["file"] = sys.stdout # avoid the red block in IPython
# Remove the bar from the printed string, only print stats
if not kwargs.get("bar_format", None):
kwargs["bar_format"] = r"{n}/|/{l_bar}{r_bar}"
# Initialize parent class + avoid printing by using gui=True
kwargs["gui"] = True
super(tqdm_notebook, self).__init__(*args, **kwargs)
if self.disable or not kwargs["gui"]:
return
# Get bar width
self.ncols = "100%" if self.dynamic_ncols else kwargs.get("ncols", None)
# Replace with IPython progress bar display (with correct total)
unit_scale = 1 if self.unit_scale is True else self.unit_scale or 1
total = self.total * unit_scale if self.total else self.total
self.container = self.status_printer(self.fp, total, self.desc, self.ncols)
self.sp = self.display
# Print initial bar state
if not self.disable:
self.display()
|
https://github.com/tqdm/tqdm/issues/594
|
for _ in tqdm_notebook(range(10), ncols='400px', bar_format='{bar}'):
sleep(0.1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-aced8917afa9> in <module>()
1 # px ncols
----> 2 for _ in tqdm_notebook(range(10),ncols='400px', bar_format='{bar}'): #400px
3 sleep(0.1)
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/__init__.py in tqdm_notebook(*args, **kwargs)
23 """See tqdm._tqdm_notebook.tqdm_notebook for full documentation"""
24 from ._tqdm_notebook import tqdm_notebook as _tqdm_notebook
---> 25 return _tqdm_notebook(*args, **kwargs)
26
27
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/_tqdm_notebook.py in __init__(self, *args, **kwargs)
203 # Print initial bar state
204 if not self.disable:
--> 205 self.sp(self.__repr__()) # same as self.refresh without clearing
206
207 def __iter__(self, *args, **kwargs):
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/_tqdm.py in __repr__(self, elapsed)
890 self.desc, self.ascii, self.unit,
891 self.unit_scale, 1 / self.avg_time if self.avg_time else None,
--> 892 self.bar_format, self.postfix, self.unit_divisor)
893
894 @property
/usr/local/anaconda/lib/python3.6/site-packages/tqdm/_tqdm.py in format_meter(n, total, elapsed, ncols, prefix, ascii, unit, unit_scale, rate, bar_format, postfix, unit_divisor)
363 # Formatting progress bar
364 # space available for bar's display
--> 365 N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
366 else 10
367
TypeError: unsupported operand type(s) for -: 'str' and 'int'
|
TypeError
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
) # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import Panel
try:
# pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try:
# pandas>=0.23.0
from pandas.core.groupby.groupby import (
DataFrameGroupBy,
SeriesGroupBy,
GroupBy,
PanelGroupBy,
)
except ImportError:
from pandas.core.groupby import (
DataFrameGroupBy,
SeriesGroupBy,
GroupBy,
PanelGroupBy,
)
deprecated_t = [tkwargs.pop("deprecated_t", None)]
def inner_generator(df_function="apply"):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, "ngroups", None))
if total is None: # not grouped
if df_function == "applymap":
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or not isinstance(
df, _Rolling_and_Expanding
):
# DataFrame or Panel
axis = kwargs.get("axis", 0)
if axis == "index":
axis = 0
elif axis == "columns":
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally"
+ " not supported by"
+ " `(DataFrame|Series|GroupBy).progress_apply`."
+ " Use keyword arguments instead.",
fp_write=getattr(t.fp, "write", sys.stderr.write),
)
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator("map")
SeriesGroupBy.progress_map = inner_generator("map")
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator("applymap")
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator("aggregate")
GroupBy.progress_transform = inner_generator("transform")
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
|
https://github.com/tqdm/tqdm/issues/780
|
Traceback (most recent call last):
File "/Users/martin/anaconda3/envs/momepy37/lib/python3.7/site-packages/tqdm/_tqdm.py", line 613, in pandas
from pandas.core.groupby.groupby import DataFrameGroupBy, \
ImportError: cannot import name 'DataFrameGroupBy' from 'pandas.core.groupby.groupby' (/Users/martin/anaconda3/envs/momepy37/lib/python3.7/site-packages/pandas/core/groupby/groupby.py)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/martin/anaconda3/envs/momepy37/lib/python3.7/site-packages/tqdm/_tqdm.py", line 616, in pandas
from pandas.core.groupby import DataFrameGroupBy, \
ImportError: cannot import name 'PanelGroupBy' from 'pandas.core.groupby' (/Users/martin/anaconda3/envs/momepy37/lib/python3.7/site-packages/pandas/core/groupby/__init__.py)
|
ImportError
|
def __init__(
self,
iterable=None,
desc=None,
total=None,
leave=True,
file=None,
ncols=None,
mininterval=0.1,
maxinterval=10.0,
miniters=None,
ascii=None,
disable=False,
unit="it",
unit_scale=False,
dynamic_ncols=False,
smoothing=0.3,
bar_format=None,
initial=0,
position=None,
postfix=None,
unit_divisor=1000,
write_bytes=None,
gui=False,
**kwargs,
):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s, remaining,
remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, "encoding", None) or "utf-8"
)
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
from textwrap import dedent
raise (
TqdmDeprecationWarning(
dedent("""\
`nested` is deprecated and automated.
Use `position` instead for manual control.
"""),
fp_write=getattr(file, "write", sys.stderr.write),
)
if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs))
)
# Preprocess the arguments
if (
(ncols is None) and (file in (sys.stderr, sys.stdout))
) or dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ""
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
self.display()
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
|
def __init__(
self,
iterable=None,
desc=None,
total=None,
leave=True,
file=None,
ncols=None,
mininterval=0.1,
maxinterval=10.0,
miniters=None,
ascii=None,
disable=False,
unit="it",
unit_scale=False,
dynamic_ncols=False,
smoothing=0.3,
bar_format=None,
initial=0,
position=None,
postfix=None,
unit_divisor=1000,
write_bytes=None,
gui=False,
**kwargs,
):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, elapsed_s, remaining,
remaining_s, desc, postfix, unit.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(file, encoding=getattr(file, "encoding", "utf-8"))
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
from textwrap import dedent
raise (
TqdmDeprecationWarning(
dedent("""\
`nested` is deprecated and automated.
Use `position` instead for manual control.
"""),
fp_write=getattr(file, "write", sys.stderr.write),
)
if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs))
)
# Preprocess the arguments
if (
(ncols is None) and (file in (sys.stderr, sys.stdout))
) or dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ""
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
self.display()
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
|
https://github.com/tqdm/tqdm/issues/673
|
Traceback (most recent call last):
File "./patcher/env/bin/autopatch", line 11, in <module>
load_entry_point('PearPatcher==0.1', 'console_scripts', 'autopatch')()
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/commandlineinterface.py", line 40, in main
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/util.py", line 58, in tracedFunctionWrapper
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/commandlineinterface.py", line 64, in run
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/util.py", line 58, in tracedFunctionWrapper
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/commandlineinterface.py", line 162, in patch_action
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/swiftscanner.py", line 52, in create_patches
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/util.py", line 58, in tracedFunctionWrapper
File "build/bdist.macosx-10.13-x86_64/egg/pearpatcher/swiftscanner.py", line 61, in scan
File "./patcher/env/lib/python2.7/site-packages/tqdm-4.31.0-py2.7.egg/tqdm/_tqdm.py", line 945, in __init__
self.display()
File "./patcher/env/lib/python2.7/site-packages/tqdm-4.31.0-py2.7.egg/tqdm/_tqdm.py", line 1315, in display
self.sp(self.__repr__() if msg is None else msg)
File "./patcher/env/lib/python2.7/site-packages/tqdm-4.31.0-py2.7.egg/tqdm/_tqdm.py", line 250, in print_status
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
File "./patcher/env/lib/python2.7/site-packages/tqdm-4.31.0-py2.7.egg/tqdm/_tqdm.py", line 243, in fp_write
fp.write(_unicode(s))
File "./patcher/env/lib/python2.7/site-packages/tqdm-4.31.0-py2.7.egg/tqdm/_utils.py", line 160, in write
self, 'encoding')))
TypeError: encode() argument 1 must be string, not None
Exception TypeError: TypeError('encode() argument 1 must be string, not None',) in <bound method tqdm.__del__ of 0%| | 0/11698 [00:00<?, ?it/s]> ignored
|
TypeError
|
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
from warnings import warn
warn(
"tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning,
)
cls.monitor_interval = 0
# Return the instance
return instance
|
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
from warnings import warn
warn(
"tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
RuntimeWarning,
)
cls.monitor_interval = 0
# Return the instance
return instance
|
https://github.com/tqdm/tqdm/issues/522
|
[s1758208@login04(eddie) ~]$ conda create -n test
Solving environment: done
## Package Plan ##
environment location: /exports/eddie/scratch/s1758208/minitest/envs/test
Proceed ([y]/n)? y
Preparing transaction: done
Verifying transaction: done
Executing transaction: done
#
# To activate this environment, use
#
# $ conda activate test
#
# To deactivate an active environment, use
#
# $ conda deactivate
[s1758208@login04(eddie) ~]$ conda activate test
(test) [s1758208@login04(eddie) ~]$ conda install python
Solving environment: done
## Package Plan ##
environment location: /exports/eddie/scratch/s1758208/minitest/envs/test
added / updated specs:
- python
The following packages will be downloaded:
package | build
---------------------------|-----------------
setuptools-38.5.1 | py36_0 525 KB
libstdcxx-ng-7.2.0 | hdf63c60_3 2.5 MB
libgcc-ng-7.2.0 | hdf63c60_3 6.1 MB
pip-9.0.1 | py36_5 2.2 MB
python-3.6.4 | hc3d631a_3 29.1 MB
------------------------------------------------------------
Total: 40.4 MB
The following NEW packages will be INSTALLED:
ca-certificates: 2017.08.26-h1d4fec5_0
certifi: 2018.1.18-py36_0
libedit: 3.1-heed3624_0
libffi: 3.2.1-hd88cf55_4
libgcc-ng: 7.2.0-hdf63c60_3
libstdcxx-ng: 7.2.0-hdf63c60_3
ncurses: 6.0-h9df7e31_2
openssl: 1.0.2n-hb7f436b_0
pip: 9.0.1-py36_5
python: 3.6.4-hc3d631a_3
readline: 7.0-ha6073c6_4
setuptools: 38.5.1-py36_0
sqlite: 3.22.0-h1bed415_0
tk: 8.6.7-hc745277_3
wheel: 0.30.0-py36hfd4bba0_1
xz: 5.2.3-h55aa19d_2
zlib: 1.2.11-ha838bed_2
Proceed ([y]/n)? y
Downloading and Extracting Packages
setuptools 38.5.1: ##################################################### | 100%
libstdcxx-ng 7.2.0: #################################################### | 100%
# >>>>>>>>>>>>>>>>>>>>>> ERROR REPORT <<<<<<<<<<<<<<<<<<<<<<
Traceback (most recent call last):
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/exceptions.py", line 789, in __call__
return func(*args, **kwargs)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/main.py", line 78, in _main
exit_code = do_call(args, p)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/conda_argparse.py", line 77, in do_call
exit_code = getattr(module, func_name)(args, parser)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/main_install.py", line 11, in execute
install(args, parser, 'install')
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/install.py", line 255, in install
handle_txn(progressive_fetch_extract, unlink_link_transaction, prefix, args, newenv)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/install.py", line 281, in handle_txn
progressive_fetch_extract.execute()
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/core/package_cache.py", line 584, in execute
exc = self._execute_actions(prec_or_spec, prec_actions)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/core/package_cache.py", line 599, in _execute_actions
progress_bar = ProgressBar(desc, not context.verbosity and not context.quiet, context.json)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/common/io.py", line 390, in __init__
self.pbar = tqdm(desc=description, bar_format=bar_format, ascii=True, total=1)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/_vendor/tqdm/_tqdm.py", line 388, in __new__
cls.monitor = TMonitor(cls, cls.monitor_interval)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/_vendor/tqdm/_tqdm.py", line 83, in __init__
self.start()
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/threading.py", line 846, in start
_start_new_thread(self._bootstrap, ())
RuntimeError: can't start new thread
|
RuntimeError
|
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
# sys.stderr.write(str(e))
# sys.stderr.write("\ntqdm:disabling monitor support"
# " (monitor_interval = 0)\n")
cls.monitor_interval = 0
# Return the instance
return instance
|
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()):
cls.monitor = TMonitor(cls, cls.monitor_interval)
# Return the instance
return instance
|
https://github.com/tqdm/tqdm/issues/522
|
[s1758208@login04(eddie) ~]$ conda create -n test
Solving environment: done
## Package Plan ##
environment location: /exports/eddie/scratch/s1758208/minitest/envs/test
Proceed ([y]/n)? y
Preparing transaction: done
Verifying transaction: done
Executing transaction: done
#
# To activate this environment, use
#
# $ conda activate test
#
# To deactivate an active environment, use
#
# $ conda deactivate
[s1758208@login04(eddie) ~]$ conda activate test
(test) [s1758208@login04(eddie) ~]$ conda install python
Solving environment: done
## Package Plan ##
environment location: /exports/eddie/scratch/s1758208/minitest/envs/test
added / updated specs:
- python
The following packages will be downloaded:
package | build
---------------------------|-----------------
setuptools-38.5.1 | py36_0 525 KB
libstdcxx-ng-7.2.0 | hdf63c60_3 2.5 MB
libgcc-ng-7.2.0 | hdf63c60_3 6.1 MB
pip-9.0.1 | py36_5 2.2 MB
python-3.6.4 | hc3d631a_3 29.1 MB
------------------------------------------------------------
Total: 40.4 MB
The following NEW packages will be INSTALLED:
ca-certificates: 2017.08.26-h1d4fec5_0
certifi: 2018.1.18-py36_0
libedit: 3.1-heed3624_0
libffi: 3.2.1-hd88cf55_4
libgcc-ng: 7.2.0-hdf63c60_3
libstdcxx-ng: 7.2.0-hdf63c60_3
ncurses: 6.0-h9df7e31_2
openssl: 1.0.2n-hb7f436b_0
pip: 9.0.1-py36_5
python: 3.6.4-hc3d631a_3
readline: 7.0-ha6073c6_4
setuptools: 38.5.1-py36_0
sqlite: 3.22.0-h1bed415_0
tk: 8.6.7-hc745277_3
wheel: 0.30.0-py36hfd4bba0_1
xz: 5.2.3-h55aa19d_2
zlib: 1.2.11-ha838bed_2
Proceed ([y]/n)? y
Downloading and Extracting Packages
setuptools 38.5.1: ##################################################### | 100%
libstdcxx-ng 7.2.0: #################################################### | 100%
# >>>>>>>>>>>>>>>>>>>>>> ERROR REPORT <<<<<<<<<<<<<<<<<<<<<<
Traceback (most recent call last):
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/exceptions.py", line 789, in __call__
return func(*args, **kwargs)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/main.py", line 78, in _main
exit_code = do_call(args, p)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/conda_argparse.py", line 77, in do_call
exit_code = getattr(module, func_name)(args, parser)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/main_install.py", line 11, in execute
install(args, parser, 'install')
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/install.py", line 255, in install
handle_txn(progressive_fetch_extract, unlink_link_transaction, prefix, args, newenv)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/cli/install.py", line 281, in handle_txn
progressive_fetch_extract.execute()
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/core/package_cache.py", line 584, in execute
exc = self._execute_actions(prec_or_spec, prec_actions)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/core/package_cache.py", line 599, in _execute_actions
progress_bar = ProgressBar(desc, not context.verbosity and not context.quiet, context.json)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/common/io.py", line 390, in __init__
self.pbar = tqdm(desc=description, bar_format=bar_format, ascii=True, total=1)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/_vendor/tqdm/_tqdm.py", line 388, in __new__
cls.monitor = TMonitor(cls, cls.monitor_interval)
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/site-packages/conda/_vendor/tqdm/_tqdm.py", line 83, in __init__
self.start()
File "/exports/eddie/scratch/s1758208/minitest/lib/python3.6/threading.py", line 846, in start
_start_new_thread(self._bootstrap, ())
RuntimeError: can't start new thread
|
RuntimeError
|
def write(cls, s, file=sys.stdout, end="\n"):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file
# Clear all bars
inst_cleared = []
for inst in getattr(cls, "_instances", []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == fp or all(f in (sys.stdout, sys.stderr) for f in (fp, inst.fp)):
inst.clear()
inst_cleared.append(inst)
# Write the message
fp.write(s)
fp.write(end)
# Force refresh display of bars we cleared
for inst in inst_cleared:
# Avoid racing conditions by checking that the instance started
if hasattr(inst, "started") and inst.started:
inst.refresh()
|
def write(cls, s, file=sys.stdout, end="\n"):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file
# Clear all bars
inst_cleared = []
for inst in getattr(cls, "_instances", []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == fp or all(f in (sys.stdout, sys.stderr) for f in (fp, inst.fp)):
inst.clear()
inst_cleared.append(inst)
# Write the message
fp.write(s)
fp.write(end)
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh()
|
https://github.com/tqdm/tqdm/issues/268
|
Traceback (most recent call last):
File "x.py", line 273, in train
bar.update(0)
File "C:\Python27\lib\site-packages\tqdm\_tqdm.py", line 808, in update
if self.avg_time is None \
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def __init__(
self,
iterable=None,
desc=None,
total=None,
leave=True,
file=sys.stderr,
ncols=None,
mininterval=0.1,
maxinterval=10.0,
miniters=None,
ascii=None,
disable=False,
unit="it",
unit_scale=False,
dynamic_ncols=False,
smoothing=0.3,
bar_format=None,
initial=0,
position=None,
gui=False,
**kwargs,
):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress update interval, in seconds [default: 10.0].
miniters : int, optional
Minimum progress update interval, in iterations.
If specified, will set `mininterval` to 0.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
If unspecified, will use '{l_bar}{bar}{r_bar}', where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"""\
`nested` is deprecated and automated. Use position instead for manual control.
""",
fp_write=getattr(file, "write", sys.stderr.write),
)
if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs))
)
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if (
(ncols is None) and (file in (sys.stderr, sys.stdout))
) or dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
ncols = dynamic_ncols(file)
else:
ncols = _environ_cols_wrapper()(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc + ": " if desc else ""
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
self.pos = self._get_free_pos(self) if position is None else position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
if self.pos:
self.moveto(self.pos)
self.sp(
self.format_meter(
self.n,
total,
0,
(dynamic_ncols(file) if dynamic_ncols else ncols),
self.desc,
ascii,
unit,
unit_scale,
None,
bar_format,
)
)
if self.pos:
self.moveto(-self.pos)
# Init the time counter
self.start_t = self.last_print_t = self._time()
# Avoid race conditions by setting a flag at the very end of init
self.started = True
|
def __init__(
self,
iterable=None,
desc=None,
total=None,
leave=True,
file=sys.stderr,
ncols=None,
mininterval=0.1,
maxinterval=10.0,
miniters=None,
ascii=None,
disable=False,
unit="it",
unit_scale=False,
dynamic_ncols=False,
smoothing=0.3,
bar_format=None,
initial=0,
position=None,
gui=False,
**kwargs,
):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
[default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress update interval, in seconds [default: 10.0].
miniters : int, optional
Minimum progress update interval, in iterations.
If specified, will set `mininterval` to 0.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False].
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool, optional
If set, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False].
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
If unspecified, will use '{l_bar}{bar}{r_bar}', where l_bar is
'{desc}{percentage:3.0f}%|' and r_bar is
'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}]'
Possible vars: bar, n, n_fmt, total, total_fmt, percentage,
rate, rate_fmt, elapsed, remaining, l_bar, r_bar, desc.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"""\
`nested` is deprecated and automated. Use position instead for manual control.
""",
fp_write=getattr(file, "write", sys.stderr.write),
)
if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs))
)
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if (
(ncols is None) and (file in (sys.stderr, sys.stdout))
) or dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
ncols = dynamic_ncols(file)
else:
ncols = _environ_cols_wrapper()(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc + ": " if desc else ""
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
self.pos = self._get_free_pos(self) if position is None else position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
if self.pos:
self.moveto(self.pos)
self.sp(
self.format_meter(
self.n,
total,
0,
(dynamic_ncols(file) if dynamic_ncols else ncols),
self.desc,
ascii,
unit,
unit_scale,
None,
bar_format,
)
)
if self.pos:
self.moveto(-self.pos)
# Init the time counter
self.start_t = self.last_print_t = self._time()
|
https://github.com/tqdm/tqdm/issues/268
|
Traceback (most recent call last):
File "x.py", line 273, in train
bar.update(0)
File "C:\Python27\lib\site-packages\tqdm\_tqdm.py", line 808, in update
if self.avg_time is None \
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def __repr__(self):
return self.format_meter(
self.n,
self.total,
self._time() - self.start_t,
self.ncols,
self.desc,
self.ascii,
self.unit,
self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format,
)
|
def __repr__(self):
return self.format_meter(
self.n,
self.total,
time() - self.last_print_t,
self.ncols,
self.desc,
self.ascii,
self.unit,
self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format,
)
|
https://github.com/tqdm/tqdm/issues/268
|
Traceback (most recent call last):
File "x.py", line 273, in train
bar.update(0)
File "C:\Python27\lib\site-packages\tqdm\_tqdm.py", line 808, in update
if self.avg_time is None \
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
_time = self._time
format_meter = self.format_meter
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning(
"""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""",
fp_write=getattr(self.fp, "write", sys.stderr.write),
)
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check the counter first (avoid calls to time())
if n - last_print_n >= miniters:
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
avg_time = (
delta_t / delta_it
if avg_time is None
else smoothing * delta_t / delta_it
+ (1 - smoothing) * avg_time
)
if self.pos:
self.moveto(self.pos)
# Printing the bar's update
sp(
format_meter(
n,
self.total,
elapsed,
(dynamic_ncols(self.fp) if dynamic_ncols else ncols),
self.desc,
ascii,
unit,
unit_scale,
1 / avg_time if avg_time else None,
bar_format,
)
)
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = (
smoothing * delta_it * mininterval / delta_t
+ (1 - smoothing) * miniters
)
else:
miniters = smoothing * delta_it + (1 - smoothing) * miniters
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
|
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
_time = self._time
format_meter = self.format_meter
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning(
"""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""",
fp_write=getattr(self.fp, "write", sys.stderr.write),
)
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check the counter first (avoid calls to time())
if n - last_print_n >= miniters:
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = (
delta_t / delta_it
if avg_time is None
else smoothing * delta_t / delta_it
+ (1 - smoothing) * avg_time
)
if self.pos:
self.moveto(self.pos)
# Printing the bar's update
sp(
format_meter(
n,
self.total,
elapsed,
(dynamic_ncols(self.fp) if dynamic_ncols else ncols),
self.desc,
ascii,
unit,
unit_scale,
1 / avg_time if avg_time else None,
bar_format,
)
)
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = (
smoothing * delta_it * mininterval / delta_t
+ (1 - smoothing) * miniters
)
else:
miniters = smoothing * delta_it + (1 - smoothing) * miniters
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
|
https://github.com/tqdm/tqdm/issues/268
|
Traceback (most recent call last):
File "x.py", line 273, in train
bar.update(0)
File "C:\Python27\lib\site-packages\tqdm\_tqdm.py", line 808, in update
if self.avg_time is None \
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
if self.n - self.last_print_n >= self.miniters:
# We check the counter first, to reduce the overhead of time()
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # should be n?
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
self.avg_time = (
delta_t / delta_it
if self.avg_time is None
else self.smoothing * delta_t / delta_it
+ (1 - self.smoothing) * self.avg_time
)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""",
fp_write=getattr(self.fp, "write", sys.stderr.write),
)
if self.pos:
self.moveto(self.pos)
# Print bar's update
self.sp(
self.format_meter(
self.n,
self.total,
elapsed,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols),
self.desc,
self.ascii,
self.unit,
self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format,
)
)
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval / delta_t
elif self.mininterval and delta_t:
self.miniters = (
self.smoothing * delta_it * self.mininterval / delta_t
+ (1 - self.smoothing) * self.miniters
)
else:
self.miniters = (
self.smoothing * delta_it + (1 - self.smoothing) * self.miniters
)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
|
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int
Increment to add to the internal counter of iterations
[default: 1].
"""
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
if self.n - self.last_print_n >= self.miniters:
# We check the counter first, to reduce the overhead of time()
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # should be n?
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = (
delta_t / delta_it
if self.avg_time is None
else self.smoothing * delta_t / delta_it
+ (1 - self.smoothing) * self.avg_time
)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""",
fp_write=getattr(self.fp, "write", sys.stderr.write),
)
if self.pos:
self.moveto(self.pos)
# Print bar's update
self.sp(
self.format_meter(
self.n,
self.total,
elapsed,
(self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols),
self.desc,
self.ascii,
self.unit,
self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format,
)
)
if self.pos:
self.moveto(-self.pos)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval / delta_t
elif self.mininterval and delta_t:
self.miniters = (
self.smoothing * delta_it * self.mininterval / delta_t
+ (1 - self.smoothing) * self.miniters
)
else:
self.miniters = (
self.smoothing * delta_it + (1 - self.smoothing) * self.miniters
)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
|
https://github.com/tqdm/tqdm/issues/268
|
Traceback (most recent call last):
File "x.py", line 273, in train
bar.update(0)
File "C:\Python27\lib\site-packages\tqdm\_tqdm.py", line 808, in update
if self.avg_time is None \
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def status_printer(_, total=None, desc=None, ncols=None):
"""
Manage the printing of an IPython/Jupyter Notebook progress bar widget.
"""
# Fallback to text bar if there's no total
# DEPRECATED: replaced with an 'info' style bar
# if not total:
# return super(tqdm_notebook, tqdm_notebook).status_printer(file)
# fp = file
# Prepare IPython progress bar
try:
if total:
pbar = IntProgress(min=0, max=total)
else: # No total? Show info style bar with no progress tqdm status
pbar = IntProgress(min=0, max=1)
pbar.value = 1
pbar.bar_style = "info"
except NameError:
# #187 #451 #558
raise ImportError(
"IntProgress not found. Please update juputer and ipywidgets."
" See https://ipywidgets.readthedocs.io/en/stable"
"/user_install.html"
)
if desc:
pbar.description = desc
# Prepare status text
ptext = HTML()
# Only way to place text to the right of the bar is to use a container
container = HBox(children=[pbar, ptext])
# Prepare layout
if ncols is not None: # use default style of ipywidgets
# ncols could be 100, "100px", "100%"
ncols = str(ncols) # ipywidgets only accepts string
if ncols[-1].isnumeric():
# if last value is digit, assume the value is digit
ncols += "px"
pbar.layout.flex = "2"
container.layout.width = ncols
container.layout.display = "inline-flex"
container.layout.flex_flow = "row wrap"
display(container)
def print_status(s="", close=False, bar_style=None, desc=None):
# Note: contrary to native tqdm, s='' does NOT clear bar
# goal is to keep all infos if error happens so user knows
# at which iteration the loop failed.
# Clear previous output (really necessary?)
# clear_output(wait=1)
# Get current iteration value from format_meter string
if total:
# n = None
if s:
npos = s.find(r"/|/") # cause we use bar_format=r'{n}|...'
# Check that n can be found in s (else n > total)
if npos >= 0:
n = int(s[:npos]) # get n from string
s = s[npos + 3 :] # remove from string
# Update bar with current n value
if n is not None:
pbar.value = n
# Print stats
if s: # never clear the bar (signal: s='')
s = s.replace("||", "") # remove inesthetical pipes
s = escape(s) # html escape special characters (like '?')
ptext.value = s
# Change bar style
if bar_style:
# Hack-ish way to avoid the danger bar_style being overriden by
# success because the bar gets closed after the error...
if not (pbar.bar_style == "danger" and bar_style == "success"):
pbar.bar_style = bar_style
# Special signal to close the bar
if close and pbar.bar_style != "danger": # hide only if no error
try:
container.close()
except AttributeError:
container.visible = False
# Update description
if desc:
pbar.description = desc
return print_status
|
def status_printer(_, total=None, desc=None, ncols=None):
"""
Manage the printing of an IPython/Jupyter Notebook progress bar widget.
"""
# Fallback to text bar if there's no total
# DEPRECATED: replaced with an 'info' style bar
# if not total:
# return super(tqdm_notebook, tqdm_notebook).status_printer(file)
# fp = file
# Prepare IPython progress bar
if total:
pbar = IntProgress(min=0, max=total)
else: # No total? Show info style bar with no progress tqdm status
pbar = IntProgress(min=0, max=1)
pbar.value = 1
pbar.bar_style = "info"
if desc:
pbar.description = desc
# Prepare status text
ptext = HTML()
# Only way to place text to the right of the bar is to use a container
container = HBox(children=[pbar, ptext])
# Prepare layout
if ncols is not None: # use default style of ipywidgets
# ncols could be 100, "100px", "100%"
ncols = str(ncols) # ipywidgets only accepts string
if ncols[-1].isnumeric():
# if last value is digit, assume the value is digit
ncols += "px"
pbar.layout.flex = "2"
container.layout.width = ncols
container.layout.display = "inline-flex"
container.layout.flex_flow = "row wrap"
display(container)
def print_status(s="", close=False, bar_style=None, desc=None):
# Note: contrary to native tqdm, s='' does NOT clear bar
# goal is to keep all infos if error happens so user knows
# at which iteration the loop failed.
# Clear previous output (really necessary?)
# clear_output(wait=1)
# Get current iteration value from format_meter string
if total:
# n = None
if s:
npos = s.find(r"/|/") # cause we use bar_format=r'{n}|...'
# Check that n can be found in s (else n > total)
if npos >= 0:
n = int(s[:npos]) # get n from string
s = s[npos + 3 :] # remove from string
# Update bar with current n value
if n is not None:
pbar.value = n
# Print stats
if s: # never clear the bar (signal: s='')
s = s.replace("||", "") # remove inesthetical pipes
s = escape(s) # html escape special characters (like '?')
ptext.value = s
# Change bar style
if bar_style:
# Hack-ish way to avoid the danger bar_style being overriden by
# success because the bar gets closed after the error...
if not (pbar.bar_style == "danger" and bar_style == "success"):
pbar.bar_style = bar_style
# Special signal to close the bar
if close and pbar.bar_style != "danger": # hide only if no error
try:
container.close()
except AttributeError:
container.visible = False
# Update description
if desc:
pbar.description = desc
return print_status
|
https://github.com/tqdm/tqdm/issues/187
|
NameError Traceback (most recent call last)
<ipython-input-6-207cef0f8cd2> in <module>()
1 import tqdm
----> 2 for i in tqdm.tqdm_notebook(range(1000000)):
3 i * 2
~/.local/lib/python3.4/site-packages/tqdm/__init__.py in tqdm_notebook(*args, **kwargs)
17 """See tqdm._tqdm_notebook.tqdm_notebook for full documentation"""
18 from ._tqdm_notebook import tqdm_notebook as _tqdm_notebook
---> 19 return _tqdm_notebook(*args, **kwargs)
20
21
~/.local/lib/python3.4/site-packages/tqdm/_tqdm_notebook.py in __init__(self, *args, **kwargs)
182 # self.sp('', close=True)
183 # Replace with IPython progress bar display (with correct total)
--> 184 self.sp = self.status_printer(self.fp, self.total, self.desc)
185 self.desc = None # trick to place description before the bar
186
~/.local/lib/python3.4/site-packages/tqdm/_tqdm_notebook.py in status_printer(file, total, desc)
103 # Prepare IPython progress bar
104 if total:
--> 105 pbar = IntProgress(min=0, max=total)
106 else: # No total? Show info style bar with no progress tqdm status
107 pbar = IntProgress(min=0, max=1)
NameError: name 'IntProgress' is not defined
|
NameError
|
def validate(cls: Type["Model"], value: Any) -> "Model":
if isinstance(value, cls):
return value.copy() if cls.__config__.copy_on_model_validation else value
value = cls._enforce_dict_if_root(value)
if isinstance(value, dict):
return cls(**value)
elif cls.__config__.orm_mode:
return cls.from_orm(value)
else:
try:
value_as_dict = dict(value)
except (TypeError, ValueError) as e:
raise DictError() from e
return cls(**value_as_dict)
|
def validate(cls: Type["Model"], value: Any) -> "Model":
value = cls._enforce_dict_if_root(value)
if isinstance(value, dict):
return cls(**value)
elif isinstance(value, cls):
return value.copy() if cls.__config__.copy_on_model_validation else value
elif cls.__config__.orm_mode:
return cls.from_orm(value)
else:
try:
value_as_dict = dict(value)
except (TypeError, ValueError) as e:
raise DictError() from e
return cls(**value_as_dict)
|
https://github.com/samuelcolvin/pydantic/issues/2449
|
from typing import Generic
from typing import TypeVar
from typing import List
from pydantic.generics import GenericModel
from pydantic import BaseModel
T = TypeVar("T")
class BaseList(GenericModel, Generic[T]):
__root__: List[T]
class Test(BaseModel):
mylist: BaseList[int]
Test(mylist=[1,2,3,4])
# Test(mylist=BaseList[int](__root__=[1, 2, 3, 4]))
Test(mylist=BaseList[int](__root__=[1,2,3,4]))
# ---------------------------------------------------------------------------
# ValidationError Traceback (most recent call last)
# <ipython-input-10-c373af038c5b> in <module>
# ----> 1 Test(mylist=BaseList[int](__root__=[1,2,3,4]))
#
# /usr/local/lib/python3.8/dist-packages/pydantic/main.cpython-38-x86_64-linux-gnu.so in pydantic.main.BaseModel.__init__()
#
# ValidationError: 1 validation error for Test
# mylist -> __root__
# value is not a valid list (type=type_error.list)
|
ValidationError
|
def iter_contained_typevars(v: Any) -> Iterator[TypeVarType]:
"""Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found."""
if isinstance(v, TypeVar):
yield v
elif (
hasattr(v, "__parameters__")
and not get_origin(v)
and lenient_issubclass(v, GenericModel)
):
yield from v.__parameters__
elif isinstance(v, (DictValues, list)):
for var in v:
yield from iter_contained_typevars(var)
else:
args = get_args(v)
for arg in args:
yield from iter_contained_typevars(arg)
|
def iter_contained_typevars(v: Any) -> Iterator[TypeVarType]:
"""Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found."""
if isinstance(v, TypeVar):
yield v
elif (
hasattr(v, "__parameters__")
and not get_origin(v)
and lenient_issubclass(v, GenericModel)
):
yield from v.__parameters__
elif isinstance(v, Iterable):
for var in v:
yield from iter_contained_typevars(var)
else:
args = get_args(v)
for arg in args:
yield from iter_contained_typevars(arg)
|
https://github.com/samuelcolvin/pydantic/issues/2454
|
Traceback (most recent call last):
File "scratch_101.py", line 12, in <module>
GModelType = GModel[Fields, str]
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 110, in __class_getitem__
{param: None for param in iter_contained_typevars(typevars_map.values())}
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 110, in <dictcomp>
{param: None for param in iter_contained_typevars(typevars_map.values())}
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 216, in iter_contained_typevars
yield from iter_contained_typevars(var)
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 220, in iter_contained_typevars
yield from iter_contained_typevars(arg)
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 216, in iter_contained_typevars
yield from iter_contained_typevars(var)
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 216, in iter_contained_typevars
yield from iter_contained_typevars(var)
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 216, in iter_contained_typevars
yield from iter_contained_typevars(var)
[Previous line repeated 982 more times]
File "virtualenvs\foobar-HGIuaRl7-py3.9\lib\site-packages\pydantic\generics.py", line 214, in iter_contained_typevars
elif isinstance(v, Iterable):
File "C:\Programs\Python\Python39_x64\lib\typing.py", line 657, in __instancecheck__
return self.__subclasscheck__(type(obj))
File "C:\Programs\Python\Python39_x64\lib\typing.py", line 789, in __subclasscheck__
return issubclass(cls, self.__origin__)
File "C:\Programs\Python\Python39_x64\lib\abc.py", line 102, in __subclasscheck__
return _abc_subclasscheck(cls, subclass)
RecursionError: maximum recursion depth exceeded in comparison
|
RecursionError
|
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity)
# typing interface is horrible, we have to do some ugly checks
if lenient_issubclass(self.type_, JsonWrapper):
self.type_ = self.type_.inner_type
self.parse_json = True
elif lenient_issubclass(self.type_, Json):
self.type_ = Any
self.parse_json = True
elif isinstance(self.type_, TypeVar):
if self.type_.__bound__:
self.type_ = self.type_.__bound__
elif self.type_.__constraints__:
self.type_ = Union[self.type_.__constraints__]
else:
self.type_ = Any
elif is_new_type(self.type_):
self.type_ = new_type_supertype(self.type_)
if self.type_ is Any:
if self.required is Undefined:
self.required = False
self.allow_none = True
return
elif self.type_ is Pattern:
# python 3.7 only, Pattern is a typing object but without sub fields
return
elif is_literal_type(self.type_):
return
elif is_typeddict(self.type_):
return
origin = get_origin(self.type_)
if origin is None:
# field is not "typing" object eg. Union, Dict, List etc.
# allow None for virtual superclasses of NoneType, e.g. Hashable
if isinstance(self.type_, type) and isinstance(None, self.type_):
self.allow_none = True
return
if origin is Annotated:
self.type_ = get_args(self.type_)[0]
self._type_analysis()
return
if origin is Callable:
return
if origin is Union:
types_ = []
for type_ in get_args(self.type_):
if type_ is NoneType:
if self.required is Undefined:
self.required = False
self.allow_none = True
continue
types_.append(type_)
if len(types_) == 1:
# Optional[]
self.type_ = types_[0]
# this is the one case where the "outer type" isn't just the original type
self.outer_type_ = self.type_
# re-run to correctly interpret the new self.type_
self._type_analysis()
else:
self.sub_fields = [
self._create_sub_type(t, f"{self.name}_{display_as_type(t)}")
for t in types_
]
return
if issubclass(origin, Tuple): # type: ignore
# origin == Tuple without item type
args = get_args(self.type_)
if not args: # plain tuple
self.type_ = Any
self.shape = SHAPE_TUPLE_ELLIPSIS
elif len(args) == 2 and args[1] is Ellipsis: # e.g. Tuple[int, ...]
self.type_ = args[0]
self.shape = SHAPE_TUPLE_ELLIPSIS
self.sub_fields = [self._create_sub_type(args[0], f"{self.name}_0")]
elif args == ((),): # Tuple[()] means empty tuple
self.shape = SHAPE_TUPLE
self.type_ = Any
self.sub_fields = []
else:
self.shape = SHAPE_TUPLE
self.sub_fields = [
self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(args)
]
return
if issubclass(origin, List):
# Create self validators
get_validators = getattr(self.type_, "__get_validators__", None)
if get_validators:
self.class_validators.update(
{
f"list_{i}": Validator(validator, pre=True)
for i, validator in enumerate(get_validators())
}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_LIST
elif issubclass(origin, Set):
# Create self validators
get_validators = getattr(self.type_, "__get_validators__", None)
if get_validators:
self.class_validators.update(
{
f"set_{i}": Validator(validator, pre=True)
for i, validator in enumerate(get_validators())
}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SET
elif issubclass(origin, FrozenSet):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_FROZENSET
elif issubclass(origin, Deque):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_DEQUE
elif issubclass(origin, Sequence):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SEQUENCE
elif issubclass(origin, DefaultDict):
self.key_field = self._create_sub_type(
get_args(self.type_)[0], "key_" + self.name, for_keys=True
)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DEFAULTDICT
elif issubclass(origin, Dict):
self.key_field = self._create_sub_type(
get_args(self.type_)[0], "key_" + self.name, for_keys=True
)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DICT
elif issubclass(origin, Mapping):
self.key_field = self._create_sub_type(
get_args(self.type_)[0], "key_" + self.name, for_keys=True
)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_MAPPING
# Equality check as almost everything inherits form Iterable, including str
# check for Iterable and CollectionsIterable, as it could receive one even when declared with the other
elif origin in {Iterable, CollectionsIterable}:
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_ITERABLE
self.sub_fields = [self._create_sub_type(self.type_, f"{self.name}_type")]
elif issubclass(origin, Type): # type: ignore
return
elif (
hasattr(origin, "__get_validators__")
or self.model_config.arbitrary_types_allowed
):
# Is a Pydantic-compatible generic that handles itself
# or we have arbitrary_types_allowed = True
self.shape = SHAPE_GENERIC
self.sub_fields = [
self._create_sub_type(t, f"{self.name}_{i}")
for i, t in enumerate(get_args(self.type_))
]
self.type_ = origin
return
else:
raise TypeError(f'Fields of type "{origin}" are not supported.')
# type_ has been refined eg. as the type of a List and sub_fields needs to be populated
self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
|
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity)
# typing interface is horrible, we have to do some ugly checks
if lenient_issubclass(self.type_, JsonWrapper):
self.type_ = self.type_.inner_type
self.parse_json = True
elif lenient_issubclass(self.type_, Json):
self.type_ = Any
self.parse_json = True
elif isinstance(self.type_, TypeVar):
if self.type_.__bound__:
self.type_ = self.type_.__bound__
elif self.type_.__constraints__:
self.type_ = Union[self.type_.__constraints__]
else:
self.type_ = Any
elif is_new_type(self.type_):
self.type_ = new_type_supertype(self.type_)
if self.type_ is Any:
if self.required is Undefined:
self.required = False
self.allow_none = True
return
elif self.type_ is Pattern:
# python 3.7 only, Pattern is a typing object but without sub fields
return
elif is_literal_type(self.type_):
return
elif is_typeddict(self.type_):
return
origin = get_origin(self.type_)
if origin is None:
# field is not "typing" object eg. Union, Dict, List etc.
# allow None for virtual superclasses of NoneType, e.g. Hashable
if isinstance(self.type_, type) and isinstance(None, self.type_):
self.allow_none = True
return
if origin is Annotated:
self.type_ = get_args(self.type_)[0]
self._type_analysis()
return
if origin is Callable:
return
if origin is Union:
types_ = []
for type_ in get_args(self.type_):
if type_ is NoneType:
if self.required is Undefined:
self.required = False
self.allow_none = True
continue
types_.append(type_)
if len(types_) == 1:
# Optional[]
self.type_ = types_[0]
# this is the one case where the "outer type" isn't just the original type
self.outer_type_ = self.type_
# re-run to correctly interpret the new self.type_
self._type_analysis()
else:
self.sub_fields = [
self._create_sub_type(t, f"{self.name}_{display_as_type(t)}")
for t in types_
]
return
if issubclass(origin, Tuple): # type: ignore
# origin == Tuple without item type
args = get_args(self.type_)
if not args: # plain tuple
self.type_ = Any
self.shape = SHAPE_TUPLE_ELLIPSIS
elif len(args) == 2 and args[1] is Ellipsis: # e.g. Tuple[int, ...]
self.type_ = args[0]
self.shape = SHAPE_TUPLE_ELLIPSIS
elif args == ((),): # Tuple[()] means empty tuple
self.shape = SHAPE_TUPLE
self.type_ = Any
self.sub_fields = []
else:
self.shape = SHAPE_TUPLE
self.sub_fields = [
self._create_sub_type(t, f"{self.name}_{i}") for i, t in enumerate(args)
]
return
if issubclass(origin, List):
# Create self validators
get_validators = getattr(self.type_, "__get_validators__", None)
if get_validators:
self.class_validators.update(
{
f"list_{i}": Validator(validator, pre=True)
for i, validator in enumerate(get_validators())
}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_LIST
elif issubclass(origin, Set):
# Create self validators
get_validators = getattr(self.type_, "__get_validators__", None)
if get_validators:
self.class_validators.update(
{
f"set_{i}": Validator(validator, pre=True)
for i, validator in enumerate(get_validators())
}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SET
elif issubclass(origin, FrozenSet):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_FROZENSET
elif issubclass(origin, Deque):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_DEQUE
elif issubclass(origin, Sequence):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SEQUENCE
elif issubclass(origin, DefaultDict):
self.key_field = self._create_sub_type(
get_args(self.type_)[0], "key_" + self.name, for_keys=True
)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DEFAULTDICT
elif issubclass(origin, Dict):
self.key_field = self._create_sub_type(
get_args(self.type_)[0], "key_" + self.name, for_keys=True
)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DICT
elif issubclass(origin, Mapping):
self.key_field = self._create_sub_type(
get_args(self.type_)[0], "key_" + self.name, for_keys=True
)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_MAPPING
# Equality check as almost everything inherits form Iterable, including str
# check for Iterable and CollectionsIterable, as it could receive one even when declared with the other
elif origin in {Iterable, CollectionsIterable}:
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_ITERABLE
self.sub_fields = [self._create_sub_type(self.type_, f"{self.name}_type")]
elif issubclass(origin, Type): # type: ignore
return
elif (
hasattr(origin, "__get_validators__")
or self.model_config.arbitrary_types_allowed
):
# Is a Pydantic-compatible generic that handles itself
# or we have arbitrary_types_allowed = True
self.shape = SHAPE_GENERIC
self.sub_fields = [
self._create_sub_type(t, f"{self.name}_{i}")
for i, t in enumerate(get_args(self.type_))
]
self.type_ = origin
return
else:
raise TypeError(f'Fields of type "{origin}" are not supported.')
# type_ has been refined eg. as the type of a List and sub_fields needs to be populated
self.sub_fields = [self._create_sub_type(self.type_, "_" + self.name)]
|
https://github.com/samuelcolvin/pydantic/issues/2416
|
TypeError Traceback (most recent call last)
~/miniconda3/envs/napdev/lib/python3.8/site-packages/pydantic/validators.cpython-38-darwin.so in pydantic.validators.find_validators()
TypeError: issubclass() arg 1 must be a class
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-3-3f541325cc4d> in <module>
----> 1 class C(BaseModel):
2 t: Tuple[Tuple[int], ...] = ()
3
~/miniconda3/envs/napdev/lib/python3.8/site-packages/pydantic/main.cpython-38-darwin.so in pydantic.main.ModelMetaclass.__new__()
~/miniconda3/envs/napdev/lib/python3.8/site-packages/pydantic/fields.cpython-38-darwin.so in pydantic.fields.ModelField.infer()
~/miniconda3/envs/napdev/lib/python3.8/site-packages/pydantic/fields.cpython-38-darwin.so in pydantic.fields.ModelField.__init__()
~/miniconda3/envs/napdev/lib/python3.8/site-packages/pydantic/fields.cpython-38-darwin.so in pydantic.fields.ModelField.prepare()
~/miniconda3/envs/napdev/lib/python3.8/site-packages/pydantic/fields.cpython-38-darwin.so in pydantic.fields.ModelField.populate_validators()
~/miniconda3/envs/napdev/lib/python3.8/site-packages/pydantic/validators.cpython-38-darwin.so in find_validators()
RuntimeError: error checking inheritance of typing.Tuple[int] (type: Tuple[int])
|
TypeError
|
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901
fields: Dict[str, ModelField] = {}
config = BaseConfig
validators: "ValidatorListDict" = {}
pre_root_validators, post_root_validators = [], []
private_attributes: Dict[str, ModelPrivateAttr] = {}
slots: SetStr = namespace.get("__slots__", ())
slots = {slots} if isinstance(slots, str) else set(slots)
class_vars: SetStr = set()
hash_func: Optional[Callable[[Any], int]] = None
for base in reversed(bases):
if (
_is_base_model_class_defined
and issubclass(base, BaseModel)
and base != BaseModel
):
fields.update(smart_deepcopy(base.__fields__))
config = inherit_config(base.__config__, config)
validators = inherit_validators(base.__validators__, validators)
pre_root_validators += base.__pre_root_validators__
post_root_validators += base.__post_root_validators__
private_attributes.update(base.__private_attributes__)
class_vars.update(base.__class_vars__)
hash_func = base.__hash__
config_kwargs = {
key: kwargs.pop(key) for key in kwargs.keys() & BaseConfig.__dict__.keys()
}
config_from_namespace = namespace.get("Config")
if config_kwargs and config_from_namespace:
raise TypeError(
"Specifying config in two places is ambiguous, use either Config attribute or class kwargs"
)
config = inherit_config(config_from_namespace, config, **config_kwargs)
validators = inherit_validators(extract_validators(namespace), validators)
vg = ValidatorGroup(validators)
for f in fields.values():
f.set_config(config)
extra_validators = vg.get_validators(f.name)
if extra_validators:
f.class_validators.update(extra_validators)
# re-run prepare to add extra validators
f.populate_validators()
prepare_config(config, name)
untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES
def is_untouched(v: Any) -> bool:
return (
isinstance(v, untouched_types)
or v.__class__.__name__ == "cython_function_or_method"
)
if (namespace.get("__module__"), namespace.get("__qualname__")) != (
"pydantic.main",
"BaseModel",
):
annotations = resolve_annotations(
namespace.get("__annotations__", {}), namespace.get("__module__", None)
)
# annotation only fields need to come first in fields
for ann_name, ann_type in annotations.items():
if is_classvar(ann_type):
class_vars.add(ann_name)
elif is_valid_field(ann_name):
validate_field_name(bases, ann_name)
value = namespace.get(ann_name, Undefined)
allowed_types = (
get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,)
)
if (
is_untouched(value)
and ann_type != PyObject
and not any(
lenient_issubclass(get_origin(allowed_type), Type)
for allowed_type in allowed_types
)
):
continue
fields[ann_name] = ModelField.infer(
name=ann_name,
value=value,
annotation=ann_type,
class_validators=vg.get_validators(ann_name),
config=config,
)
elif ann_name not in namespace and config.underscore_attrs_are_private:
private_attributes[ann_name] = PrivateAttr()
untouched_types = UNTOUCHED_TYPES + config.keep_untouched
for var_name, value in namespace.items():
can_be_changed = var_name not in class_vars and not is_untouched(value)
if isinstance(value, ModelPrivateAttr):
if not is_valid_private_name(var_name):
raise NameError(
f'Private attributes "{var_name}" must not be a valid field name; '
f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"'
)
private_attributes[var_name] = value
elif (
config.underscore_attrs_are_private
and is_valid_private_name(var_name)
and can_be_changed
):
private_attributes[var_name] = PrivateAttr(default=value)
elif (
is_valid_field(var_name)
and var_name not in annotations
and can_be_changed
):
validate_field_name(bases, var_name)
inferred = ModelField.infer(
name=var_name,
value=value,
annotation=annotations.get(var_name, Undefined),
class_validators=vg.get_validators(var_name),
config=config,
)
if var_name in fields and inferred.type_ != fields[var_name].type_:
raise TypeError(
f"The type of {name}.{var_name} differs from the new default value; "
f"if you wish to change the type of this field, please use a type annotation"
)
fields[var_name] = inferred
_custom_root_type = ROOT_KEY in fields
if _custom_root_type:
validate_custom_root_type(fields)
vg.check_for_unused()
if config.json_encoders:
json_encoder = partial(custom_pydantic_encoder, config.json_encoders)
else:
json_encoder = pydantic_encoder
pre_rv_new, post_rv_new = extract_root_validators(namespace)
if hash_func is None:
hash_func = generate_hash_function(config.frozen)
exclude_from_namespace = fields | private_attributes.keys() | {"__slots__"}
new_namespace = {
"__config__": config,
"__fields__": fields,
"__validators__": vg.validators,
"__pre_root_validators__": unique_list(pre_root_validators + pre_rv_new),
"__post_root_validators__": unique_list(post_root_validators + post_rv_new),
"__schema_cache__": {},
"__json_encoder__": staticmethod(json_encoder),
"__custom_root_type__": _custom_root_type,
"__private_attributes__": private_attributes,
"__slots__": slots | private_attributes.keys(),
"__hash__": hash_func,
"__class_vars__": class_vars,
**{n: v for n, v in namespace.items() if n not in exclude_from_namespace},
}
cls = super().__new__(mcs, name, bases, new_namespace, **kwargs)
# set __signature__ attr only for model class, but not for its instances
cls.__signature__ = ClassAttribute(
"__signature__", generate_model_signature(cls.__init__, fields, config)
)
return cls
|
def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901
fields: Dict[str, ModelField] = {}
config = BaseConfig
validators: "ValidatorListDict" = {}
pre_root_validators, post_root_validators = [], []
private_attributes: Dict[str, ModelPrivateAttr] = {}
slots: SetStr = namespace.get("__slots__", ())
slots = {slots} if isinstance(slots, str) else set(slots)
class_vars: SetStr = set()
for base in reversed(bases):
if (
_is_base_model_class_defined
and issubclass(base, BaseModel)
and base != BaseModel
):
fields.update(smart_deepcopy(base.__fields__))
config = inherit_config(base.__config__, config)
validators = inherit_validators(base.__validators__, validators)
pre_root_validators += base.__pre_root_validators__
post_root_validators += base.__post_root_validators__
private_attributes.update(base.__private_attributes__)
class_vars.update(base.__class_vars__)
config_kwargs = {
key: kwargs.pop(key) for key in kwargs.keys() & BaseConfig.__dict__.keys()
}
config_from_namespace = namespace.get("Config")
if config_kwargs and config_from_namespace:
raise TypeError(
"Specifying config in two places is ambiguous, use either Config attribute or class kwargs"
)
config = inherit_config(config_from_namespace, config, **config_kwargs)
validators = inherit_validators(extract_validators(namespace), validators)
vg = ValidatorGroup(validators)
for f in fields.values():
f.set_config(config)
extra_validators = vg.get_validators(f.name)
if extra_validators:
f.class_validators.update(extra_validators)
# re-run prepare to add extra validators
f.populate_validators()
prepare_config(config, name)
untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES
def is_untouched(v: Any) -> bool:
return (
isinstance(v, untouched_types)
or v.__class__.__name__ == "cython_function_or_method"
)
if (namespace.get("__module__"), namespace.get("__qualname__")) != (
"pydantic.main",
"BaseModel",
):
annotations = resolve_annotations(
namespace.get("__annotations__", {}), namespace.get("__module__", None)
)
# annotation only fields need to come first in fields
for ann_name, ann_type in annotations.items():
if is_classvar(ann_type):
class_vars.add(ann_name)
elif is_valid_field(ann_name):
validate_field_name(bases, ann_name)
value = namespace.get(ann_name, Undefined)
allowed_types = (
get_args(ann_type) if get_origin(ann_type) is Union else (ann_type,)
)
if (
is_untouched(value)
and ann_type != PyObject
and not any(
lenient_issubclass(get_origin(allowed_type), Type)
for allowed_type in allowed_types
)
):
continue
fields[ann_name] = ModelField.infer(
name=ann_name,
value=value,
annotation=ann_type,
class_validators=vg.get_validators(ann_name),
config=config,
)
elif ann_name not in namespace and config.underscore_attrs_are_private:
private_attributes[ann_name] = PrivateAttr()
untouched_types = UNTOUCHED_TYPES + config.keep_untouched
for var_name, value in namespace.items():
can_be_changed = var_name not in class_vars and not is_untouched(value)
if isinstance(value, ModelPrivateAttr):
if not is_valid_private_name(var_name):
raise NameError(
f'Private attributes "{var_name}" must not be a valid field name; '
f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"'
)
private_attributes[var_name] = value
elif (
config.underscore_attrs_are_private
and is_valid_private_name(var_name)
and can_be_changed
):
private_attributes[var_name] = PrivateAttr(default=value)
elif (
is_valid_field(var_name)
and var_name not in annotations
and can_be_changed
):
validate_field_name(bases, var_name)
inferred = ModelField.infer(
name=var_name,
value=value,
annotation=annotations.get(var_name, Undefined),
class_validators=vg.get_validators(var_name),
config=config,
)
if var_name in fields and inferred.type_ != fields[var_name].type_:
raise TypeError(
f"The type of {name}.{var_name} differs from the new default value; "
f"if you wish to change the type of this field, please use a type annotation"
)
fields[var_name] = inferred
_custom_root_type = ROOT_KEY in fields
if _custom_root_type:
validate_custom_root_type(fields)
vg.check_for_unused()
if config.json_encoders:
json_encoder = partial(custom_pydantic_encoder, config.json_encoders)
else:
json_encoder = pydantic_encoder
pre_rv_new, post_rv_new = extract_root_validators(namespace)
exclude_from_namespace = fields | private_attributes.keys() | {"__slots__"}
new_namespace = {
"__config__": config,
"__fields__": fields,
"__validators__": vg.validators,
"__pre_root_validators__": unique_list(pre_root_validators + pre_rv_new),
"__post_root_validators__": unique_list(post_root_validators + post_rv_new),
"__schema_cache__": {},
"__json_encoder__": staticmethod(json_encoder),
"__custom_root_type__": _custom_root_type,
"__private_attributes__": private_attributes,
"__slots__": slots | private_attributes.keys(),
"__hash__": generate_hash_function(config.frozen),
"__class_vars__": class_vars,
**{n: v for n, v in namespace.items() if n not in exclude_from_namespace},
}
cls = super().__new__(mcs, name, bases, new_namespace, **kwargs)
# set __signature__ attr only for model class, but not for its instances
cls.__signature__ = ClassAttribute(
"__signature__", generate_model_signature(cls.__init__, fields, config)
)
return cls
|
https://github.com/samuelcolvin/pydantic/issues/2422
|
Traceback (most recent call last):
File "bug.py", line 4, in <module>
class Foo1(BaseModel):
File "pydantic/main.py", line 352, in pydantic.main.ModelMetaclass.__new__
File "/usr/lib/python3.8/abc.py", line 85, in __new__
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
ValueError: '__hash__' in __slots__ conflicts with class variable
|
ValueError
|
def list_length_validator(cls, v: "Optional[List[T]]") -> "Optional[List[T]]":
if v is None:
return None
v = list_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.ListMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.ListMaxLengthError(limit_value=cls.max_items)
return v
|
def list_length_validator(
cls, v: "Optional[List[T]]", field: "ModelField"
) -> "Optional[List[T]]":
if v is None and not field.required:
return None
v = list_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.ListMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.ListMaxLengthError(limit_value=cls.max_items)
return v
|
https://github.com/samuelcolvin/pydantic/issues/2320
|
Info model set: network_segments=None
Info model conset: network_segments=None
Info settings set: network_segments=None
Traceback (most recent call last):
File "/home/mihai/bug.py", line 25, in <module>
print(f"Info settings conset: {InfoSettingsConset()}")
File "pydantic/env_settings.py", line 34, in pydantic.env_settings.BaseSettings.__init__
File "pydantic/main.py", line 362, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for InfoSettingsConset
network_segments
value is not a valid set (type=type_error.set)
|
pydantic.error_wrappers.ValidationError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.