instance_id
stringlengths
17
36
text
stringlengths
14k
547k
repo
stringclasses
4 values
base_commit
stringlengths
40
40
problem_statement
stringclasses
10 values
hints_text
stringclasses
8 values
created_at
stringlengths
20
20
patch
stringclasses
10 values
test_patch
stringclasses
10 values
version
stringclasses
2 values
FAIL_TO_PASS
stringclasses
1 value
PASS_TO_PASS
stringclasses
1 value
environment_setup_commit
stringclasses
1 value
mixpanel__mixpanel-python-64
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> flush function for Buffered Consumer not working Hi, in class BufferedConsumer the flush function in line 338 should change to def flush (self,api_key=None) and then in line 444-445 should change to: for endpoint in self._buffers.keys(): self._flush_endpoint(endpoint,api_key=api_key) </issue> <code> [start of README.rst] 1 mixpanel-python |travis-badge| 2 ============================== 3 4 This is the official Mixpanel Python library. This library allows for 5 server-side integration of Mixpanel. 6 7 8 Installation 9 ------------ 10 11 The library can be installed using pip:: 12 13 pip install mixpanel 14 15 16 Getting Started 17 --------------- 18 19 Typical usage usually looks like this:: 20 21 from mixpanel import Mixpanel 22 23 mp = Mixpanel(YOUR_TOKEN) 24 25 # tracks an event with certain properties 26 mp.track(DISTINCT_ID, 'button clicked', {'color' : 'blue', 'size': 'large'}) 27 28 # sends an update to a user profile 29 mp.people_set(DISTINCT_ID, {'$first_name' : 'Ilya', 'favorite pizza': 'margherita'}) 30 31 You can use an instance of the Mixpanel class for sending all of your events 32 and people updates. 33 34 35 Additional Information 36 ---------------------- 37 38 * `Help Docs`_ 39 * `Full Documentation`_ 40 * mixpanel-python-async_; a third party tool for sending data asynchronously 41 from the tracking python process. 42 43 44 .. |travis-badge| image:: https://travis-ci.org/mixpanel/mixpanel-python.svg?branch=master 45 :target: https://travis-ci.org/mixpanel/mixpanel-python 46 .. _Help Docs: https://www.mixpanel.com/help/reference/python 47 .. _Full Documentation: http://mixpanel.github.io/mixpanel-python/ 48 .. _mixpanel-python-async: https://github.com/jessepollak/mixpanel-python-async 49 [end of README.rst] [start of mixpanel/__init__.py] 1 # -*- coding: utf-8 -*- 2 """This is the official Mixpanel client library for Python. 3 4 Mixpanel client libraries allow for tracking events and setting properties on 5 People Analytics profiles from your server-side projects. This is the API 6 documentation; you may also be interested in the higher-level `usage 7 documentation`_. If your users are interacting with your application via the 8 web, you may also be interested in our `JavaScript library`_. 9 10 .. _`Javascript library`: https://mixpanel.com/help/reference/javascript 11 .. _`usage documentation`: https://mixpanel.com/help/reference/python 12 13 :class:`~.Mixpanel` is the primary class for tracking events and sending People 14 Analytics updates. :class:`~.Consumer` and :class:`~.BufferedConsumer` allow 15 callers to customize the IO characteristics of their tracking. 16 """ 17 from __future__ import absolute_import, unicode_literals 18 import base64 19 import datetime 20 import json 21 import time 22 23 import six 24 from six.moves import urllib 25 26 __version__ = '4.3.1' 27 VERSION = __version__ # TODO: remove when bumping major version. 28 29 30 class DatetimeSerializer(json.JSONEncoder): 31 def default(self, obj): 32 if isinstance(obj, datetime.datetime): 33 fmt = '%Y-%m-%dT%H:%M:%S' 34 return obj.strftime(fmt) 35 36 return json.JSONEncoder.default(self, obj) 37 38 39 def json_dumps(data, cls=None): 40 # Separators are specified to eliminate whitespace. 41 return json.dumps(data, separators=(',', ':'), cls=cls) 42 43 44 class Mixpanel(object): 45 """Instances of Mixpanel are used for all events and profile updates. 46 47 :param str token: your project's Mixpanel token 48 :param consumer: can be used to alter the behavior of tracking (default 49 :class:`~.Consumer`) 50 :param json.JSONEncoder serializer: a JSONEncoder subclass used to handle 51 JSON serialization (default :class:`~.DatetimeSerializer`) 52 53 See `Built-in consumers`_ for details about the consumer interface. 54 55 .. versionadded:: 4.2.0 56 The *serializer* parameter. 57 """ 58 59 def __init__(self, token, consumer=None, serializer=DatetimeSerializer): 60 self._token = token 61 self._consumer = consumer or Consumer() 62 self._serializer = serializer 63 64 def _now(self): 65 return time.time() 66 67 def track(self, distinct_id, event_name, properties=None, meta=None): 68 """Record an event. 69 70 :param str distinct_id: identifies the user triggering the event 71 :param str event_name: a name describing the event 72 :param dict properties: additional data to record; keys should be 73 strings, and values should be strings, numbers, or booleans 74 :param dict meta: overrides Mixpanel special properties 75 76 ``properties`` should describe the circumstances of the event, or 77 aspects of the source or user associated with it. ``meta`` is used 78 (rarely) to override special values sent in the event object. 79 """ 80 all_properties = { 81 'token': self._token, 82 'distinct_id': distinct_id, 83 'time': int(self._now()), 84 'mp_lib': 'python', 85 '$lib_version': __version__, 86 } 87 if properties: 88 all_properties.update(properties) 89 event = { 90 'event': event_name, 91 'properties': all_properties, 92 } 93 if meta: 94 event.update(meta) 95 self._consumer.send('events', json_dumps(event, cls=self._serializer)) 96 97 def import_data(self, api_key, distinct_id, event_name, timestamp, 98 properties=None, meta=None): 99 """Record an event that occured more than 5 days in the past. 100 101 :param str api_key: your Mixpanel project's API key 102 :param str distinct_id: identifies the user triggering the event 103 :param str event_name: a name describing the event 104 :param int timestamp: UTC seconds since epoch 105 :param dict properties: additional data to record; keys should be 106 strings, and values should be strings, numbers, or booleans 107 :param dict meta: overrides Mixpanel special properties 108 109 To avoid accidentally recording invalid events, the Mixpanel API's 110 ``track`` endpoint disallows events that occurred too long ago. This 111 method can be used to import such events. See our online documentation 112 for `more details 113 <https://mixpanel.com/docs/api-documentation/importing-events-older-than-31-days>`__. 114 """ 115 all_properties = { 116 'token': self._token, 117 'distinct_id': distinct_id, 118 'time': int(timestamp), 119 'mp_lib': 'python', 120 '$lib_version': __version__, 121 } 122 if properties: 123 all_properties.update(properties) 124 event = { 125 'event': event_name, 126 'properties': all_properties, 127 } 128 if meta: 129 event.update(meta) 130 self._consumer.send('imports', json_dumps(event, cls=self._serializer), api_key) 131 132 def alias(self, alias_id, original, meta=None): 133 """Apply a custom alias to a people record. 134 135 :param str alias_id: the new distinct_id 136 :param str original: the previous distinct_id 137 :param dict meta: overrides Mixpanel special properties 138 139 Immediately creates a one-way mapping between two ``distinct_ids``. 140 Events triggered by the new id will be associated with the existing 141 user's profile and behavior. See our online documentation for `more 142 details 143 <https://mixpanel.com/docs/integration-libraries/using-mixpanel-alias>`__. 144 145 .. note:: 146 Calling this method *always* results in a synchronous HTTP request 147 to Mixpanel servers, regardless of any custom consumer. 148 """ 149 sync_consumer = Consumer() 150 event = { 151 'event': '$create_alias', 152 'properties': { 153 'distinct_id': original, 154 'alias': alias_id, 155 'token': self._token, 156 }, 157 } 158 if meta: 159 event.update(meta) 160 sync_consumer.send('events', json_dumps(event, cls=self._serializer)) 161 162 def people_set(self, distinct_id, properties, meta=None): 163 """Set properties of a people record. 164 165 :param str distinct_id: the profile to update 166 :param dict properties: properties to set 167 :param dict meta: overrides Mixpanel `special properties`_ 168 169 .. _`special properties`: https://mixpanel.com/help/reference/http#people-analytics-updates 170 171 If the profile does not exist, creates a new profile with these properties. 172 """ 173 return self.people_update({ 174 '$distinct_id': distinct_id, 175 '$set': properties, 176 }, meta=meta or {}) 177 178 def people_set_once(self, distinct_id, properties, meta=None): 179 """Set properties of a people record if they are not already set. 180 181 :param str distinct_id: the profile to update 182 :param dict properties: properties to set 183 184 Any properties that already exist on the profile will not be 185 overwritten. If the profile does not exist, creates a new profile with 186 these properties. 187 """ 188 return self.people_update({ 189 '$distinct_id': distinct_id, 190 '$set_once': properties, 191 }, meta=meta or {}) 192 193 def people_increment(self, distinct_id, properties, meta=None): 194 """Increment/decrement numerical properties of a people record. 195 196 :param str distinct_id: the profile to update 197 :param dict properties: properties to increment/decrement; values 198 should be numeric 199 200 Adds numerical values to properties of a people record. Nonexistent 201 properties on the record default to zero. Negative values in 202 ``properties`` will decrement the given property. 203 """ 204 return self.people_update({ 205 '$distinct_id': distinct_id, 206 '$add': properties, 207 }, meta=meta or {}) 208 209 def people_append(self, distinct_id, properties, meta=None): 210 """Append to the list associated with a property. 211 212 :param str distinct_id: the profile to update 213 :param dict properties: properties to append 214 215 Adds items to list-style properties of a people record. Appending to 216 nonexistent properties results in a list with a single element. For 217 example:: 218 219 mp.people_append('123', {'Items': 'Super Arm'}) 220 """ 221 return self.people_update({ 222 '$distinct_id': distinct_id, 223 '$append': properties, 224 }, meta=meta or {}) 225 226 def people_union(self, distinct_id, properties, meta=None): 227 """Merge the values of a list associated with a property. 228 229 :param str distinct_id: the profile to update 230 :param dict properties: properties to merge 231 232 Merges list values in ``properties`` with existing list-style 233 properties of a people record. Duplicate values are ignored. For 234 example:: 235 236 mp.people_union('123', {'Items': ['Super Arm', 'Fire Storm']}) 237 """ 238 return self.people_update({ 239 '$distinct_id': distinct_id, 240 '$union': properties, 241 }, meta=meta or {}) 242 243 def people_unset(self, distinct_id, properties, meta=None): 244 """Permanently remove properties from a people record. 245 246 :param str distinct_id: the profile to update 247 :param list properties: property names to remove 248 """ 249 return self.people_update({ 250 '$distinct_id': distinct_id, 251 '$unset': properties, 252 }, meta=meta) 253 254 def people_delete(self, distinct_id, meta=None): 255 """Permanently delete a people record. 256 257 :param str distinct_id: the profile to delete 258 """ 259 return self.people_update({ 260 '$distinct_id': distinct_id, 261 '$delete': "", 262 }, meta=meta or None) 263 264 def people_track_charge(self, distinct_id, amount, 265 properties=None, meta=None): 266 """Track a charge on a people record. 267 268 :param str distinct_id: the profile with which to associate the charge 269 :param numeric amount: number of dollars charged 270 :param dict properties: extra properties related to the transaction 271 272 Record that you have charged the current user a certain amount of 273 money. Charges recorded with this way will appear in the Mixpanel 274 revenue report. 275 """ 276 if properties is None: 277 properties = {} 278 properties.update({'$amount': amount}) 279 return self.people_append( 280 distinct_id, {'$transactions': properties or {}}, meta=meta or {} 281 ) 282 283 def people_clear_charges(self, distinct_id, meta=None): 284 """Permanently clear all charges on a people record. 285 286 :param str distinct_id: the profile whose charges will be cleared 287 """ 288 return self.people_unset( 289 distinct_id, ["$transactions"], meta=meta or {}, 290 ) 291 292 def people_update(self, message, meta=None): 293 """Send a generic update to Mixpanel people analytics. 294 295 :param dict message: the message to send 296 297 Callers are responsible for formatting the update message as documented 298 in the `Mixpanel HTTP specification`_. This method may be useful if you 299 want to use very new or experimental features of people analytics, but 300 please use the other ``people_*`` methods where possible. 301 302 .. _`Mixpanel HTTP specification`: https://mixpanel.com/help/reference/http 303 """ 304 record = { 305 '$token': self._token, 306 '$time': int(self._now() * 1000), 307 } 308 record.update(message) 309 if meta: 310 record.update(meta) 311 self._consumer.send('people', json_dumps(record, cls=self._serializer)) 312 313 314 class MixpanelException(Exception): 315 """Raised by consumers when unable to send messages. 316 317 This could be caused by a network outage or interruption, or by an invalid 318 endpoint passed to :meth:`.Consumer.send`. 319 """ 320 pass 321 322 323 class Consumer(object): 324 """ 325 A consumer that sends an HTTP request directly to the Mixpanel service, one 326 per call to :meth:`~.send`. 327 328 :param str events_url: override the default events API endpoint 329 :param str people_url: override the default people API endpoint 330 :param str import_url: override the default import API endpoint 331 :param int request_timeout: connection timeout in seconds 332 """ 333 334 def __init__(self, events_url=None, people_url=None, import_url=None, request_timeout=None): 335 self._endpoints = { 336 'events': events_url or 'https://api.mixpanel.com/track', 337 'people': people_url or 'https://api.mixpanel.com/engage', 338 'imports': import_url or 'https://api.mixpanel.com/import', 339 } 340 self._request_timeout = request_timeout 341 342 def send(self, endpoint, json_message, api_key=None): 343 """Immediately record an event or a profile update. 344 345 :param endpoint: the Mixpanel API endpoint appropriate for the message 346 :type endpoint: "events" | "people" | "imports" 347 :param str json_message: a JSON message formatted for the endpoint 348 :raises MixpanelException: if the endpoint doesn't exist, the server is 349 unreachable, or the message cannot be processed 350 """ 351 if endpoint in self._endpoints: 352 self._write_request(self._endpoints[endpoint], json_message, api_key) 353 else: 354 raise MixpanelException('No such endpoint "{0}". Valid endpoints are one of {1}'.format(endpoint, self._endpoints.keys())) 355 356 def _write_request(self, request_url, json_message, api_key=None): 357 data = { 358 'data': base64.b64encode(json_message.encode('utf8')), 359 'verbose': 1, 360 'ip': 0, 361 } 362 if api_key: 363 data.update({'api_key': api_key}) 364 encoded_data = urllib.parse.urlencode(data).encode('utf8') 365 try: 366 request = urllib.request.Request(request_url, encoded_data) 367 368 # Note: We don't send timeout=None here, because the timeout in urllib2 defaults to 369 # an internal socket timeout, not None. 370 if self._request_timeout is not None: 371 response = urllib.request.urlopen(request, timeout=self._request_timeout).read() 372 else: 373 response = urllib.request.urlopen(request).read() 374 except urllib.error.URLError as e: 375 six.raise_from(MixpanelException(e), e) 376 377 try: 378 response = json.loads(response.decode('utf8')) 379 except ValueError: 380 raise MixpanelException('Cannot interpret Mixpanel server response: {0}'.format(response)) 381 382 if response['status'] != 1: 383 raise MixpanelException('Mixpanel error: {0}'.format(response['error'])) 384 385 return True 386 387 388 class BufferedConsumer(object): 389 """ 390 A consumer that maintains per-endpoint buffers of messages and then sends 391 them in batches. This can save bandwidth and reduce the total amount of 392 time required to post your events to Mixpanel. 393 394 .. note:: 395 Because :class:`~.BufferedConsumer` holds events, you need to call 396 :meth:`~.flush` when you're sure you're done sending themβ€”for example, 397 just before your program exits. Calls to :meth:`~.flush` will send all 398 remaining unsent events being held by the instance. 399 400 :param int max_size: number of :meth:`~.send` calls for a given endpoint to 401 buffer before flushing automatically 402 :param str events_url: override the default events API endpoint 403 :param str people_url: override the default people API endpoint 404 :param str import_url: override the default import API endpoint 405 :param int request_timeout: connection timeout in seconds 406 """ 407 def __init__(self, max_size=50, events_url=None, people_url=None, import_url=None, request_timeout=None): 408 self._consumer = Consumer(events_url, people_url, import_url, request_timeout) 409 self._buffers = { 410 'events': [], 411 'people': [], 412 'imports': [], 413 } 414 self._max_size = min(50, max_size) 415 416 def send(self, endpoint, json_message, api_key=None): 417 """Record an event or profile update. 418 419 Internally, adds the message to a buffer, and then flushes the buffer 420 if it has reached the configured maximum size. Note that exceptions 421 raised may have been caused by a message buffered by an earlier call to 422 :meth:`~.send`. 423 424 :param endpoint: the Mixpanel API endpoint appropriate for the message 425 :type endpoint: "events" | "people" | "imports" 426 :param str json_message: a JSON message formatted for the endpoint 427 :raises MixpanelException: if the endpoint doesn't exist, the server is 428 unreachable, or any buffered message cannot be processed 429 """ 430 if endpoint not in self._buffers: 431 raise MixpanelException('No such endpoint "{0}". Valid endpoints are one of {1}'.format(endpoint, self._buffers.keys())) 432 433 buf = self._buffers[endpoint] 434 buf.append(json_message) 435 if len(buf) >= self._max_size: 436 self._flush_endpoint(endpoint, api_key) 437 438 def flush(self): 439 """Immediately send all buffered messages to Mixpanel. 440 441 :raises MixpanelException: if the server is unreachable or any buffered 442 message cannot be processed 443 """ 444 for endpoint in self._buffers.keys(): 445 self._flush_endpoint(endpoint) 446 447 def _flush_endpoint(self, endpoint, api_key=None): 448 buf = self._buffers[endpoint] 449 while buf: 450 batch = buf[:self._max_size] 451 batch_json = '[{0}]'.format(','.join(batch)) 452 try: 453 self._consumer.send(endpoint, batch_json, api_key) 454 except MixpanelException as orig_e: 455 mp_e = MixpanelException(orig_e) 456 mp_e.message = batch_json 457 mp_e.endpoint = endpoint 458 six.raise_from(mp_e, orig_e) 459 buf = buf[self._max_size:] 460 self._buffers[endpoint] = buf 461 [end of mixpanel/__init__.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
mixpanel/mixpanel-python
40c98e0b285898384cc4aa6cc803d8d0f46f6218
flush function for Buffered Consumer not working Hi, in class BufferedConsumer the flush function in line 338 should change to def flush (self,api_key=None) and then in line 444-445 should change to: for endpoint in self._buffers.keys(): self._flush_endpoint(endpoint,api_key=api_key)
+1 I have the same issue. The exception is: "Mixpanel error: token, missing or empty" because of this bug. +1 I also just ran into this. Is it worth submitting a PR for this? I see 3 unmerged PRs that are a few years old.
2016-12-22T00:07:05Z
<patch> diff --git a/mixpanel/__init__.py b/mixpanel/__init__.py --- a/mixpanel/__init__.py +++ b/mixpanel/__init__.py @@ -345,6 +345,7 @@ def send(self, endpoint, json_message, api_key=None): :param endpoint: the Mixpanel API endpoint appropriate for the message :type endpoint: "events" | "people" | "imports" :param str json_message: a JSON message formatted for the endpoint + :param str api_key: your Mixpanel project's API key :raises MixpanelException: if the endpoint doesn't exist, the server is unreachable, or the message cannot be processed """ @@ -412,6 +413,7 @@ def __init__(self, max_size=50, events_url=None, people_url=None, import_url=Non 'imports': [], } self._max_size = min(50, max_size) + self._api_key = None def send(self, endpoint, json_message, api_key=None): """Record an event or profile update. @@ -424,16 +426,22 @@ def send(self, endpoint, json_message, api_key=None): :param endpoint: the Mixpanel API endpoint appropriate for the message :type endpoint: "events" | "people" | "imports" :param str json_message: a JSON message formatted for the endpoint + :param str api_key: your Mixpanel project's API key :raises MixpanelException: if the endpoint doesn't exist, the server is unreachable, or any buffered message cannot be processed + + .. versionadded:: 4.3.2 + The *api_key* parameter. """ if endpoint not in self._buffers: raise MixpanelException('No such endpoint "{0}". Valid endpoints are one of {1}'.format(endpoint, self._buffers.keys())) buf = self._buffers[endpoint] buf.append(json_message) + if api_key is not None: + self._api_key = api_key if len(buf) >= self._max_size: - self._flush_endpoint(endpoint, api_key) + self._flush_endpoint(endpoint) def flush(self): """Immediately send all buffered messages to Mixpanel. @@ -444,13 +452,13 @@ def flush(self): for endpoint in self._buffers.keys(): self._flush_endpoint(endpoint) - def _flush_endpoint(self, endpoint, api_key=None): + def _flush_endpoint(self, endpoint): buf = self._buffers[endpoint] while buf: batch = buf[:self._max_size] batch_json = '[{0}]'.format(','.join(batch)) try: - self._consumer.send(endpoint, batch_json, api_key) + self._consumer.send(endpoint, batch_json, self._api_key) except MixpanelException as orig_e: mp_e = MixpanelException(orig_e) mp_e.message = batch_json </patch>
diff --git a/test_mixpanel.py b/test_mixpanel.py --- a/test_mixpanel.py +++ b/test_mixpanel.py @@ -353,40 +353,32 @@ class TestBufferedConsumer: def setup_class(cls): cls.MAX_LENGTH = 10 cls.consumer = mixpanel.BufferedConsumer(cls.MAX_LENGTH) - cls.mock = Mock() - cls.mock.read.return_value = six.b('{"status":1, "error": null}') + cls.consumer._consumer = LogConsumer() + cls.log = cls.consumer._consumer.log - def test_buffer_hold_and_flush(self): - with patch('six.moves.urllib.request.urlopen', return_value=self.mock) as urlopen: - self.consumer.send('events', '"Event"') - assert not self.mock.called - self.consumer.flush() + def setup_method(self): + del self.log[:] - assert urlopen.call_count == 1 - - (call_args, kwargs) = urlopen.call_args - (request,) = call_args - timeout = kwargs.get('timeout', None) - - assert request.get_full_url() == 'https://api.mixpanel.com/track' - assert qs(request.data) == qs('ip=0&data=WyJFdmVudCJd&verbose=1') - assert timeout is None + def test_buffer_hold_and_flush(self): + self.consumer.send('events', '"Event"') + assert len(self.log) == 0 + self.consumer.flush() + assert self.log == [('events', ['Event'])] def test_buffer_fills_up(self): - with patch('six.moves.urllib.request.urlopen', return_value=self.mock) as urlopen: - for i in range(self.MAX_LENGTH - 1): - self.consumer.send('events', '"Event"') - assert not self.mock.called - - self.consumer.send('events', '"Last Event"') + for i in range(self.MAX_LENGTH - 1): + self.consumer.send('events', '"Event"') + assert len(self.log) == 0 - assert urlopen.call_count == 1 - ((request,), _) = urlopen.call_args - assert request.get_full_url() == 'https://api.mixpanel.com/track' - assert qs(request.data) == \ - qs('ip=0&data=WyJFdmVudCIsIkV2ZW50IiwiRXZlbnQiLCJFdmVudCIsIkV2ZW50IiwiRXZlbnQiLCJFdmVudCIsIkV2ZW50IiwiRXZlbnQiLCJMYXN0IEV2ZW50Il0%3D&verbose=1') + self.consumer.send('events', '"Last Event"') + assert len(self.log) == 1 + assert self.log == [('events', [ + 'Event', 'Event', 'Event', 'Event', 'Event', + 'Event', 'Event', 'Event', 'Event', 'Last Event', + ])] - def test_unknown_endpoint(self): + def test_unknown_endpoint_raises_on_send(self): + # Ensure the exception isn't hidden until a flush. with pytest.raises(mixpanel.MixpanelException): self.consumer.send('unknown', '1') @@ -394,17 +386,19 @@ def test_useful_reraise_in_flush_endpoint(self): error_mock = Mock() error_mock.read.return_value = six.b('{"status": 0, "error": "arbitrary error"}') broken_json = '{broken JSON' + consumer = mixpanel.BufferedConsumer(2) with patch('six.moves.urllib.request.urlopen', return_value=error_mock): - self.consumer.send('events', broken_json) + consumer.send('events', broken_json) with pytest.raises(mixpanel.MixpanelException) as excinfo: - self.consumer.flush() + consumer.flush() assert excinfo.value.message == '[%s]' % broken_json assert excinfo.value.endpoint == 'events' - def test_import_data_receives_api_key(self): - # Ensure BufferedConsumer.send accepts the API_KEY parameter needed for - # import_data; see #62. + def test_send_remembers_api_key(self): self.consumer.send('imports', '"Event"', api_key='MY_API_KEY') + assert len(self.log) == 0 + self.consumer.flush() + assert self.log == [('imports', ['Event'], 'MY_API_KEY')] class TestFunctional:
4.3
NVIDIA__NeMo-7124
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Installation instructions should better indicate mandatory steps to make tests pass (or reinstall.sh needs an update) **Is your feature request related to a problem? Please describe.** I wanted to setup a dev conda environment for NeMo, so I followed steps at https://github.com/NVIDIA/NeMo/tree/main#from-source Afterwards `pytest --cpu` was failing (before it could even run any test) with two errors: * `module 'nvidia' has no attribute 'dali'` * `No module named 'pynvml'` **Describe the solution you'd like** After manually installing both libraries with * pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda120 * pip install pynvml the tests were able to pass (`1420 passed, 304 skipped, 261 warnings`). Ideally these libraries would be installed automatically by the `reinstall.sh` script. </issue> <code> [start of README.rst] 1 2 |status| |documentation| |codeql| |license| |pypi| |pyversion| |downloads| |black| 3 4 .. |status| image:: http://www.repostatus.org/badges/latest/active.svg 5 :target: http://www.repostatus.org/#active 6 :alt: Project Status: Active – The project has reached a stable, usable state and is being actively developed. 7 8 .. |documentation| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 9 :alt: Documentation 10 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 11 12 .. |license| image:: https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg 13 :target: https://github.com/NVIDIA/NeMo/blob/master/LICENSE 14 :alt: NeMo core license and license for collections in this repo 15 16 .. |pypi| image:: https://badge.fury.io/py/nemo-toolkit.svg 17 :target: https://badge.fury.io/py/nemo-toolkit 18 :alt: Release version 19 20 .. |pyversion| image:: https://img.shields.io/pypi/pyversions/nemo-toolkit.svg 21 :target: https://badge.fury.io/py/nemo-toolkit 22 :alt: Python version 23 24 .. |downloads| image:: https://static.pepy.tech/personalized-badge/nemo-toolkit?period=total&units=international_system&left_color=grey&right_color=brightgreen&left_text=downloads 25 :target: https://pepy.tech/project/nemo-toolkit 26 :alt: PyPi total downloads 27 28 .. |codeql| image:: https://github.com/nvidia/nemo/actions/workflows/codeql.yml/badge.svg?branch=main&event=push 29 :target: https://github.com/nvidia/nemo/actions/workflows/codeql.yml 30 :alt: CodeQL 31 32 .. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg 33 :target: https://github.com/psf/black 34 :alt: Code style: black 35 36 .. _main-readme: 37 38 **NVIDIA NeMo** 39 =============== 40 41 Introduction 42 ------------ 43 44 NVIDIA NeMo is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), 45 text-to-speech synthesis (TTS), large language models (LLMs), and 46 natural language processing (NLP). 47 The primary objective of NeMo is to help researchers from industry and academia to reuse prior work (code and pretrained models) 48 and make it easier to create new `conversational AI models <https://developer.nvidia.com/conversational-ai#started>`_. 49 50 All NeMo models are trained with `Lightning <https://github.com/Lightning-AI/lightning>`_ and 51 training is automatically scalable to 1000s of GPUs. 52 Additionally, NeMo Megatron LLM models can be trained up to 1 trillion parameters using tensor and pipeline model parallelism. 53 NeMo models can be optimized for inference and deployed for production use-cases with `NVIDIA Riva <https://developer.nvidia.com/riva>`_. 54 55 Getting started with NeMo is simple. 56 State of the Art pretrained NeMo models are freely available on `HuggingFace Hub <https://huggingface.co/models?library=nemo&sort=downloads&search=nvidia>`_ and 57 `NVIDIA NGC <https://catalog.ngc.nvidia.com/models?query=nemo&orderBy=weightPopularDESC>`_. 58 These models can be used to transcribe audio, synthesize speech, or translate text in just a few lines of code. 59 60 We have extensive `tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_ that 61 can all be run on `Google Colab <https://colab.research.google.com>`_. 62 63 For advanced users that want to train NeMo models from scratch or finetune existing NeMo models 64 we have a full suite of `example scripts <https://github.com/NVIDIA/NeMo/tree/main/examples>`_ that support multi-GPU/multi-node training. 65 66 For scaling NeMo LLM training on Slurm clusters or public clouds, please see the `NVIDIA NeMo Megatron Launcher <https://github.com/NVIDIA/NeMo-Megatron-Launcher>`_. 67 The NM launcher has extensive recipes, scripts, utilities, and documentation for training NeMo LLMs and also has an `Autoconfigurator <https://github.com/NVIDIA/NeMo-Megatron-Launcher#53-using-autoconfigurator-to-find-the-optimal-configuration>`_ 68 which can be used to find the optimal model parallel configuration for training on a specific cluster. 69 70 Also see our `introductory video <https://www.youtube.com/embed/wBgpMf_KQVw>`_ for a high level overview of NeMo. 71 72 Key Features 73 ------------ 74 75 * Speech processing 76 * `HuggingFace Space for Audio Transcription (File, Microphone and YouTube) <https://huggingface.co/spaces/smajumdar/nemo_multilingual_language_id>`_ 77 * `Automatic Speech Recognition (ASR) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/intro.html>`_ 78 * Supported ASR models: `<https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/models.html>`_ 79 * Jasper, QuartzNet, CitriNet, ContextNet 80 * Conformer-CTC, Conformer-Transducer, FastConformer-CTC, FastConformer-Transducer 81 * Squeezeformer-CTC and Squeezeformer-Transducer 82 * LSTM-Transducer (RNNT) and LSTM-CTC 83 * Supports the following decoders/losses: 84 * CTC 85 * Transducer/RNNT 86 * Hybrid Transducer/CTC 87 * NeMo Original `Multi-blank Transducers <https://arxiv.org/abs/2211.03541>`_ and `Token-and-Duration Transducers (TDT) <https://arxiv.org/abs/2304.06795>`_ 88 * Streaming/Buffered ASR (CTC/Transducer) - `Chunked Inference Examples <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_chunked_inference>`_ 89 * Cache-aware Streaming Conformer with multiple lookaheads - `<https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/models.html#cache-aware-streaming-conformer>`_ 90 * Beam Search decoding 91 * `Language Modelling for ASR (CTC and RNNT) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html>`_: N-gram LM in fusion with Beam Search decoding, Neural Rescoring with Transformer 92 * `Support of long audios for Conformer with memory efficient local attention <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html#inference-on-long-audio>`_ 93 * `Speech Classification, Speech Command Recognition and Language Identification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/intro.html>`_: MatchboxNet (Command Recognition), AmberNet (LangID) 94 * `Voice activity Detection (VAD) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speech_classification/models.html#marblenet-vad>`_: MarbleNet 95 * ASR with VAD Inference - `Example <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_vad>`_ 96 * `Speaker Recognition <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/intro.html>`_: TitaNet, ECAPA_TDNN, SpeakerNet 97 * `Speaker Diarization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_diarization/intro.html>`_ 98 * Clustering Diarizer: TitaNet, ECAPA_TDNN, SpeakerNet 99 * Neural Diarizer: MSDD (Multi-scale Diarization Decoder) 100 * `Speech Intent Detection and Slot Filling <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_intent_slot/intro.html>`_: Conformer-Transformer 101 * `Pretrained models on different languages. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_: English, Spanish, German, Russian, Chinese, French, Italian, Polish, ... 102 * `NGC collection of pre-trained speech processing models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_ 103 * Natural Language Processing 104 * `NeMo Megatron pre-training of Large Language Models <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/nemo_megatron/intro.html>`_ 105 * `Neural Machine Translation (NMT) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/machine_translation/machine_translation.html>`_ 106 * `Punctuation and Capitalization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html>`_ 107 * `Token classification (named entity recognition) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/token_classification.html>`_ 108 * `Text classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_classification.html>`_ 109 * `Joint Intent and Slot Classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/joint_intent_slot.html>`_ 110 * `Question answering <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/question_answering.html>`_ 111 * `GLUE benchmark <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/glue_benchmark.html>`_ 112 * `Information retrieval <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/information_retrieval.html>`_ 113 * `Entity Linking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/entity_linking.html>`_ 114 * `Dialogue State Tracking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/sgd_qa.html>`_ 115 * `Prompt Learning <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/nemo_megatron/prompt_learning.html>`_ 116 * `NGC collection of pre-trained NLP models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_nlp>`_ 117 * `Synthetic Tabular Data Generation <https://developer.nvidia.com/blog/generating-synthetic-data-with-transformers-a-solution-for-enterprise-data-challenges/>`_ 118 * Text-to-Speech Synthesis (TTS): 119 * `Documentation <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tts/intro.html#>`_ 120 * Mel-Spectrogram generators: FastPitch, SSL FastPitch, Mixer-TTS/Mixer-TTS-X, RAD-TTS, Tacotron2 121 * Vocoders: HiFiGAN, UnivNet, WaveGlow 122 * End-to-End Models: VITS 123 * `Pre-trained Model Checkpoints in NVIDIA GPU Cloud (NGC) <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_tts>`_ 124 * `Tools <https://github.com/NVIDIA/NeMo/tree/stable/tools>`_ 125 * `Text Processing (text normalization and inverse text normalization) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization/intro.html>`_ 126 * `CTC-Segmentation tool <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/ctc_segmentation.html>`_ 127 * `Speech Data Explorer <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/speech_data_explorer.html>`_: a dash-based tool for interactive exploration of ASR/TTS datasets 128 * `Speech Data Processor <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/tools/speech_data_processor.html>`_ 129 130 131 Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes. 132 133 Requirements 134 ------------ 135 136 1) Python 3.9 or above 137 2) Pytorch 1.13.1 or above 138 3) NVIDIA GPU for training 139 140 Documentation 141 ------------- 142 143 .. |main| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 144 :alt: Documentation Status 145 :scale: 100% 146 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 147 148 .. |stable| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=stable 149 :alt: Documentation Status 150 :scale: 100% 151 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/ 152 153 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 154 | Version | Status | Description | 155 +=========+=============+==========================================================================================================================================+ 156 | Latest | |main| | `Documentation of the latest (i.e. main) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/>`_ | 157 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 158 | Stable | |stable| | `Documentation of the stable (i.e. most recent release) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/>`_ | 159 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 160 161 Tutorials 162 --------- 163 A great way to start with NeMo is by checking `one of our tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_. 164 165 Getting help with NeMo 166 ---------------------- 167 FAQ can be found on NeMo's `Discussions board <https://github.com/NVIDIA/NeMo/discussions>`_. You are welcome to ask questions or start discussions there. 168 169 170 Installation 171 ------------ 172 173 Conda 174 ~~~~~ 175 176 We recommend installing NeMo in a fresh Conda environment. 177 178 .. code-block:: bash 179 180 conda create --name nemo python==3.8.10 181 conda activate nemo 182 183 Install PyTorch using their `configurator <https://pytorch.org/get-started/locally/>`_. 184 185 .. code-block:: bash 186 187 conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia 188 189 The command used to install PyTorch may depend on your system. Please use the configurator linked above to find the right command for your system. 190 191 Pip 192 ~~~ 193 Use this installation mode if you want the latest released version. 194 195 .. code-block:: bash 196 197 apt-get update && apt-get install -y libsndfile1 ffmpeg 198 pip install Cython 199 pip install nemo_toolkit['all'] 200 201 Depending on the shell used, you may need to use ``"nemo_toolkit[all]"`` instead in the above command. 202 203 Pip from source 204 ~~~~~~~~~~~~~~~ 205 Use this installation mode if you want the version from a particular GitHub branch (e.g main). 206 207 .. code-block:: bash 208 209 apt-get update && apt-get install -y libsndfile1 ffmpeg 210 pip install Cython 211 python -m pip install git+https://github.com/NVIDIA/NeMo.git@{BRANCH}#egg=nemo_toolkit[all] 212 213 214 From source 215 ~~~~~~~~~~~ 216 Use this installation mode if you are contributing to NeMo. 217 218 .. code-block:: bash 219 220 apt-get update && apt-get install -y libsndfile1 ffmpeg 221 git clone https://github.com/NVIDIA/NeMo 222 cd NeMo 223 ./reinstall.sh 224 225 If you only want the toolkit without additional conda-based dependencies, you may replace ``reinstall.sh`` 226 with ``pip install -e .`` when your PWD is the root of the NeMo repository. 227 228 RNNT 229 ~~~~ 230 Note that RNNT requires numba to be installed from conda. 231 232 .. code-block:: bash 233 234 conda remove numba 235 pip uninstall numba 236 conda install -c conda-forge numba 237 238 NeMo Megatron 239 ~~~~~~~~~~~~~ 240 NeMo Megatron training requires NVIDIA Apex to be installed. 241 Install it manually if not using the NVIDIA PyTorch container. 242 243 To install Apex, run 244 245 .. code-block:: bash 246 247 git clone https://github.com/NVIDIA/apex.git 248 cd apex 249 git checkout 57057e2fcf1c084c0fcc818f55c0ff6ea1b24ae2 250 pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" --global-option="--fast_layer_norm" --global-option="--distributed_adam" --global-option="--deprecated_fused_adam" ./ 251 252 It is highly recommended to use the NVIDIA PyTorch or NeMo container if having issues installing Apex or any other dependencies. 253 254 While installing Apex, it may raise an error if the CUDA version on your system does not match the CUDA version torch was compiled with. 255 This raise can be avoided by commenting it here: https://github.com/NVIDIA/apex/blob/master/setup.py#L32 256 257 cuda-nvprof is needed to install Apex. The version should match the CUDA version that you are using: 258 259 .. code-block:: bash 260 261 conda install -c nvidia cuda-nvprof=11.8 262 263 packaging is also needed: 264 265 .. code-block:: bash 266 267 pip install packaging 268 269 270 Transformer Engine 271 ~~~~~~~~~~~~~~~~~~ 272 NeMo Megatron GPT has been integrated with `NVIDIA Transformer Engine <https://github.com/NVIDIA/TransformerEngine>`_ 273 Transformer Engine enables FP8 training on NVIDIA Hopper GPUs. 274 `Install <https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/installation.html>`_ it manually if not using the NVIDIA PyTorch container. 275 276 .. code-block:: bash 277 278 pip install --upgrade git+https://github.com/NVIDIA/TransformerEngine.git@stable 279 280 It is highly recommended to use the NVIDIA PyTorch or NeMo container if having issues installing Transformer Engine or any other dependencies. 281 282 Transformer Engine requires PyTorch to be built with CUDA 11.8. 283 284 285 Flash Attention 286 ~~~~~~~~~~~~~~~~~~~~ 287 Transformer Engine already supports Flash Attention for GPT models. If you want to use Flash Attention for non-causal models or use with attention bias (introduced from position encoding, e.g. Alibi), please install `flash-attn <https://github.com/HazyResearch/flash-attention>`_. 288 289 .. code-block:: bash 290 291 pip install flash-attn 292 pip install triton==2.0.0.dev20221202 293 294 NLP inference UI 295 ~~~~~~~~~~~~~~~~~~~~ 296 To launch the inference web UI server, please install the gradio `gradio <https://gradio.app/>`_. 297 298 .. code-block:: bash 299 300 pip install gradio==3.34.0 301 302 NeMo Text Processing 303 ~~~~~~~~~~~~~~~~~~~~ 304 NeMo Text Processing, specifically (Inverse) Text Normalization, is now a separate repository `https://github.com/NVIDIA/NeMo-text-processing <https://github.com/NVIDIA/NeMo-text-processing>`_. 305 306 Docker containers: 307 ~~~~~~~~~~~~~~~~~~ 308 We release NeMo containers alongside NeMo releases. For example, NeMo ``r1.19.0`` comes with container ``nemo:23.04``, you may find more details about released containers in `releases page <https://github.com/NVIDIA/NeMo/releases>`_. 309 310 To use built container, please run 311 312 .. code-block:: bash 313 314 docker pull nvcr.io/nvidia/nemo:23.04 315 316 To build a nemo container with Dockerfile from a branch, please run 317 318 .. code-block:: bash 319 320 DOCKER_BUILDKIT=1 docker build -f Dockerfile -t nemo:latest . 321 322 323 If you chose to work with main branch, we recommend using NVIDIA's PyTorch container version 23.06-py3 and then installing from GitHub. 324 325 .. code-block:: bash 326 327 docker run --gpus all -it --rm -v <nemo_github_folder>:/NeMo --shm-size=8g \ 328 -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit \ 329 stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:23.06-py3 330 331 Examples 332 -------- 333 334 Many examples can be found under the `"Examples" <https://github.com/NVIDIA/NeMo/tree/stable/examples>`_ folder. 335 336 337 Contributing 338 ------------ 339 340 We welcome community contributions! Please refer to the `CONTRIBUTING.md <https://github.com/NVIDIA/NeMo/blob/stable/CONTRIBUTING.md>`_ CONTRIBUTING.md for the process. 341 342 Publications 343 ------------ 344 345 We provide an ever growing list of publications that utilize the NeMo framework. Please refer to `PUBLICATIONS.md <https://github.com/NVIDIA/NeMo/tree/stable/PUBLICATIONS.md>`_. We welcome the addition of your own articles to this list ! 346 347 License 348 ------- 349 NeMo is under `Apache 2.0 license <https://github.com/NVIDIA/NeMo/blob/stable/LICENSE>`_. 350 [end of README.rst] [start of nemo/utils/model_utils.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import copy 16 import importlib 17 import os 18 from dataclasses import dataclass, is_dataclass 19 from enum import Enum 20 from functools import lru_cache 21 from pathlib import Path 22 from typing import List, Optional, Tuple, Union 23 24 import wrapt 25 26 from nemo.utils import AppState, logging 27 from nemo.utils.data_utils import resolve_cache_dir # imported for compatibility: model_utils.resolve_cache_dir() 28 from nemo.utils.data_utils import is_datastore_path 29 30 # TODO @blisc: Perhaps refactor instead of import guarding 31 32 _HAS_HYDRA = True 33 34 try: 35 from omegaconf import DictConfig, ListConfig, OmegaConf 36 from omegaconf import errors as omegaconf_errors 37 from packaging import version 38 except ModuleNotFoundError: 39 _HAS_HYDRA = False 40 41 42 _VAL_TEST_FASTPATH_KEY = 'ds_item' 43 44 45 class ArtifactPathType(Enum): 46 """ 47 ArtifactPathType refers to the type of the path that the artifact is located at. 48 49 LOCAL_PATH: A user local filepath that exists on the file system. 50 TAR_PATH: A (generally flattened) filepath that exists inside of an archive (that may have its own full path). 51 """ 52 53 LOCAL_PATH = 0 54 TAR_PATH = 1 55 56 57 @dataclass(init=False) 58 class ArtifactItem: 59 path: str 60 path_type: ArtifactPathType 61 hashed_path: Optional[str] = None 62 63 64 def resolve_dataset_name_from_cfg(cfg: 'DictConfig') -> Optional[str]: 65 """ 66 Parses items of the provided sub-config to find the first potential key that 67 resolves to an existing file or directory. 68 69 # Fast-path Resolution 70 In order to handle cases where we need to resolve items that are not paths, a fastpath 71 key can be provided as defined in the global `_VAL_TEST_FASTPATH_KEY`. 72 73 This key can be used in two ways : 74 75 ## _VAL_TEST_FASTPATH_KEY points to another key in the config 76 77 If this _VAL_TEST_FASTPATH_KEY points to another key in this config itself, 78 then we assume we want to loop through the values of that key. 79 80 This allows for any key in the config to become a fastpath key. 81 82 Example: 83 validation_ds: 84 splits: "val" 85 ... 86 <_VAL_TEST_FASTPATH_KEY>: "splits" <-- this points to the key name "splits" 87 88 Then we can write the following when overriding in hydra: 89 ```python 90 python train_file.py ... \ 91 model.validation_ds.splits=[val1, val2, dev1, dev2] ... 92 ``` 93 94 ## _VAL_TEST_FASTPATH_KEY itself acts as the resolved key 95 96 If this _VAL_TEST_FASTPATH_KEY does not point to another key in the config, then 97 it is assumed that the items of this key itself are used for resolution. 98 99 Example: 100 validation_ds: 101 ... 102 <_VAL_TEST_FASTPATH_KEY>: "val" <-- this points to the key name "splits" 103 104 Then we can write the following when overriding in hydra: 105 ```python 106 python train_file.py ... \ 107 model.validation_ds.<_VAL_TEST_FASTPATH_KEY>=[val1, val2, dev1, dev2] ... 108 ``` 109 110 # IMPORTANT NOTE: 111 It <can> potentially mismatch if there exist more than 2 valid paths, and the 112 first path does *not* resolve the the path of the data file (but does resolve to 113 some other valid path). 114 115 To avoid this side-effect, place the data path as the first item on the config file. 116 117 Args: 118 cfg: DictConfig (Sub-config) that should be parsed. 119 120 Returns: 121 A str representing the `key` of the config which hosts the filepath(s), 122 or None in case path could not be resolved. 123 """ 124 if _VAL_TEST_FASTPATH_KEY in cfg and cfg[_VAL_TEST_FASTPATH_KEY] is not None: 125 fastpath_key = cfg[_VAL_TEST_FASTPATH_KEY] 126 127 if isinstance(fastpath_key, str) and fastpath_key in cfg: 128 return cfg[fastpath_key] 129 else: 130 return _VAL_TEST_FASTPATH_KEY 131 132 for key, value in cfg.items(): 133 if type(value) in [list, tuple, ListConfig]: 134 # Count the number of valid paths in the list 135 values_are_paths = 0 136 for val_i in value: 137 val_i = str(val_i) 138 if os.path.exists(val_i) or os.path.isdir(val_i) or is_datastore_path(val_i): 139 values_are_paths += 1 140 else: 141 # reset counter and break inner loop 142 break 143 144 if values_are_paths == len(value): 145 return key 146 147 else: 148 if os.path.exists(str(value)) or os.path.isdir(str(value)) or is_datastore_path(str(value)): 149 return key 150 151 return None 152 153 154 def parse_dataset_as_name(name: str) -> str: 155 """ 156 Constructs a valid prefix-name from a provided file path. 157 158 Args: 159 name: str path to some valid data/manifest file or a python object that 160 will be used as a name for the data loader (via str() cast). 161 162 Returns: 163 str prefix used to identify uniquely this data/manifest file. 164 """ 165 if os.path.exists(str(name)) or os.path.isdir(str(name)) or is_datastore_path(str(name)): 166 name = Path(name).stem 167 else: 168 name = str(name) 169 170 # cleanup name 171 name = name.replace('-', '_') 172 173 if 'manifest' in name: 174 name = name.replace('manifest', '') 175 176 if 'dataset' in name: 177 name = name.replace('dataset', '') 178 179 # Test if the manifes/dataset name was simply `manifest.yaml` or `dataset.yaml`: Invalid names. 180 if name == '': 181 raise ValueError( 182 "Provided dataset / manifest filename was `manifest.json` or `dataset.json`.\n" 183 "Such a name is invalid, since multiple datasets/manifests can share the same name,\n" 184 "thereby overriding their results during logging. Please pick a more discriptive filename \n" 185 "for the provided dataset / manifest file." 186 ) 187 188 if '_' != name[-1]: 189 name = name + '_' 190 191 return name 192 193 194 def unique_names_check(name_list: Optional[List[str]]): 195 """ 196 Performs a uniqueness check on the name list resolved, so that it can warn users 197 about non-unique keys. 198 199 Args: 200 name_list: List of strings resolved for data loaders. 201 """ 202 if name_list is None: 203 return 204 205 # Name uniqueness checks 206 names = set() 207 for name in name_list: 208 if name in names: 209 logging.warning( 210 "Name resolution has found more than one data loader having the same name !\n" 211 "In such cases, logs will nor be properly generated. " 212 "Please rename the item to have unique names.\n" 213 f"Resolved name : {name}" 214 ) 215 else: 216 names.add(name) # we need just hash key check, value is just a placeholder 217 218 219 def resolve_validation_dataloaders(model: 'ModelPT'): 220 """ 221 Helper method that operates on the ModelPT class to automatically support 222 multiple dataloaders for the validation set. 223 224 It does so by first resolving the path to one/more data files via `resolve_dataset_name_from_cfg()`. 225 If this resolution fails, it assumes the data loader is prepared to manually support / not support 226 multiple data loaders and simply calls the appropriate setup method. 227 228 If resolution succeeds: 229 Checks if provided path is to a single file or a list of files. 230 If a single file is provided, simply tags that file as such and loads it via the setup method. 231 If multiple files are provided: 232 Inject a new manifest path at index "i" into the resolved key. 233 Calls the appropriate setup method to set the data loader. 234 Collects the initialized data loader in a list and preserves it. 235 Once all data loaders are processed, assigns the list of loaded loaders to the ModelPT. 236 Finally assigns a list of unique names resolved from the file paths to the ModelPT. 237 238 Args: 239 model: ModelPT subclass, which requires >=1 Validation Dataloaders to be setup. 240 """ 241 if not _HAS_HYDRA: 242 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 243 exit(1) 244 cfg = copy.deepcopy(model._cfg) 245 dataloaders = [] 246 247 # process val_loss_idx 248 if 'val_dl_idx' in cfg.validation_ds: 249 cfg = OmegaConf.to_container(cfg) 250 val_dl_idx = cfg['validation_ds'].pop('val_dl_idx') 251 cfg = OmegaConf.create(cfg) 252 else: 253 val_dl_idx = 0 254 255 # Set val_loss_idx 256 model._val_dl_idx = val_dl_idx 257 258 ds_key = resolve_dataset_name_from_cfg(cfg.validation_ds) 259 260 if ds_key is None or val_dl_idx < 0: 261 logging.debug( 262 "Could not resolve file path from provided config - {}. " 263 "Disabling support for multi-dataloaders.".format(cfg.validation_ds) 264 ) 265 266 model.setup_validation_data(cfg.validation_ds) 267 return 268 269 ds_values = cfg.validation_ds[ds_key] 270 271 if isinstance(ds_values, (list, tuple, ListConfig)): 272 273 for ds_value in ds_values: 274 if isinstance(ds_value, (dict, DictConfig)): 275 # this is a nested dataset 276 cfg.validation_ds = ds_value 277 else: 278 cfg.validation_ds[ds_key] = ds_value 279 280 model.setup_validation_data(cfg.validation_ds) 281 dataloaders.append(model._validation_dl) 282 283 model._validation_dl = dataloaders 284 if len(ds_values) > 0 and isinstance(ds_values[0], (dict, DictConfig)): 285 # using the name of each of the nested dataset 286 model._validation_names = [ds.name for ds in ds_values] 287 else: 288 model._validation_names = [parse_dataset_as_name(ds) for ds in ds_values] 289 unique_names_check(name_list=model._validation_names) 290 return 291 292 else: 293 model.setup_validation_data(cfg.validation_ds) 294 model._validation_names = [parse_dataset_as_name(ds_values)] 295 unique_names_check(name_list=model._validation_names) 296 297 298 def resolve_test_dataloaders(model: 'ModelPT'): 299 """ 300 Helper method that operates on the ModelPT class to automatically support 301 multiple dataloaders for the test set. 302 303 It does so by first resolving the path to one/more data files via `resolve_dataset_name_from_cfg()`. 304 If this resolution fails, it assumes the data loader is prepared to manually support / not support 305 multiple data loaders and simply calls the appropriate setup method. 306 307 If resolution succeeds: 308 Checks if provided path is to a single file or a list of files. 309 If a single file is provided, simply tags that file as such and loads it via the setup method. 310 If multiple files are provided: 311 Inject a new manifest path at index "i" into the resolved key. 312 Calls the appropriate setup method to set the data loader. 313 Collects the initialized data loader in a list and preserves it. 314 Once all data loaders are processed, assigns the list of loaded loaders to the ModelPT. 315 Finally assigns a list of unique names resolved from the file paths to the ModelPT. 316 317 Args: 318 model: ModelPT subclass, which requires >=1 Test Dataloaders to be setup. 319 """ 320 if not _HAS_HYDRA: 321 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 322 exit(1) 323 cfg = copy.deepcopy(model._cfg) 324 dataloaders = [] 325 326 # process test_loss_idx 327 if 'test_dl_idx' in cfg.test_ds: 328 cfg = OmegaConf.to_container(cfg) 329 test_dl_idx = cfg['test_ds'].pop('test_dl_idx') 330 cfg = OmegaConf.create(cfg) 331 else: 332 test_dl_idx = 0 333 334 # Set val_loss_idx 335 model._test_dl_idx = test_dl_idx 336 337 ds_key = resolve_dataset_name_from_cfg(cfg.test_ds) 338 339 if ds_key is None: 340 logging.debug( 341 "Could not resolve file path from provided config - {}. " 342 "Disabling support for multi-dataloaders.".format(cfg.test_ds) 343 ) 344 345 model.setup_test_data(cfg.test_ds) 346 return 347 348 ds_values = cfg.test_ds[ds_key] 349 350 if isinstance(ds_values, (list, tuple, ListConfig)): 351 352 for ds_value in ds_values: 353 if isinstance(ds_value, (dict, DictConfig)): 354 # this is a nested dataset 355 cfg.test_ds = ds_value 356 else: 357 cfg.test_ds[ds_key] = ds_value 358 359 model.setup_test_data(cfg.test_ds) 360 dataloaders.append(model._test_dl) 361 362 model._test_dl = dataloaders 363 if len(ds_values) > 0 and isinstance(ds_values[0], (dict, DictConfig)): 364 # using the name of each of the nested dataset 365 model._test_names = [ds.name for ds in ds_values] 366 else: 367 model._test_names = [parse_dataset_as_name(ds) for ds in ds_values] 368 369 unique_names_check(name_list=model._test_names) 370 return 371 372 else: 373 model.setup_test_data(cfg.test_ds) 374 model._test_names = [parse_dataset_as_name(ds_values)] 375 376 unique_names_check(name_list=model._test_names) 377 378 379 @wrapt.decorator 380 def wrap_training_step(wrapped, instance: 'pl.LightningModule', args, kwargs): 381 output_dict = wrapped(*args, **kwargs) 382 383 if isinstance(output_dict, dict) and output_dict is not None and 'log' in output_dict: 384 log_dict = output_dict.pop('log') 385 instance.log_dict(log_dict, on_step=True) 386 387 return output_dict 388 389 390 def convert_model_config_to_dict_config(cfg: Union['DictConfig', 'NemoConfig']) -> 'DictConfig': 391 """ 392 Converts its input into a standard DictConfig. 393 Possible input values are: 394 - DictConfig 395 - A dataclass which is a subclass of NemoConfig 396 397 Args: 398 cfg: A dict-like object. 399 400 Returns: 401 The equivalent DictConfig 402 """ 403 if not _HAS_HYDRA: 404 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 405 exit(1) 406 if not isinstance(cfg, (OmegaConf, DictConfig)) and is_dataclass(cfg): 407 cfg = OmegaConf.structured(cfg) 408 409 if not isinstance(cfg, DictConfig): 410 raise ValueError(f"cfg constructor argument must be of type DictConfig/dict but got {type(cfg)} instead.") 411 412 config = OmegaConf.to_container(cfg, resolve=True) 413 config = OmegaConf.create(config) 414 return config 415 416 417 def _convert_config(cfg: 'OmegaConf'): 418 """ Recursive function convertint the configuration from old hydra format to the new one. """ 419 if not _HAS_HYDRA: 420 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 421 exit(1) 422 423 # Get rid of cls -> _target_. 424 if 'cls' in cfg and '_target_' not in cfg: 425 cfg._target_ = cfg.pop('cls') 426 427 # Get rid of params. 428 if 'params' in cfg: 429 params = cfg.pop('params') 430 for param_key, param_val in params.items(): 431 cfg[param_key] = param_val 432 433 # Recursion. 434 try: 435 for _, sub_cfg in cfg.items(): 436 if isinstance(sub_cfg, DictConfig): 437 _convert_config(sub_cfg) 438 except omegaconf_errors.OmegaConfBaseException as e: 439 logging.warning(f"Skipped conversion for config/subconfig:\n{cfg}\n Reason: {e}.") 440 441 442 def maybe_update_config_version(cfg: 'DictConfig'): 443 """ 444 Recursively convert Hydra 0.x configs to Hydra 1.x configs. 445 446 Changes include: 447 - `cls` -> `_target_`. 448 - `params` -> drop params and shift all arguments to parent. 449 - `target` -> `_target_` cannot be performed due to ModelPT injecting `target` inside class. 450 451 Args: 452 cfg: Any Hydra compatible DictConfig 453 454 Returns: 455 An updated DictConfig that conforms to Hydra 1.x format. 456 """ 457 if not _HAS_HYDRA: 458 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 459 exit(1) 460 if cfg is not None and not isinstance(cfg, DictConfig): 461 try: 462 temp_cfg = OmegaConf.create(cfg) 463 cfg = temp_cfg 464 except omegaconf_errors.OmegaConfBaseException: 465 # Cannot be cast to DictConfig, skip updating. 466 return cfg 467 468 # Make a copy of model config. 469 cfg = copy.deepcopy(cfg) 470 OmegaConf.set_struct(cfg, False) 471 472 # Convert config. 473 _convert_config(cfg) 474 475 # Update model config. 476 OmegaConf.set_struct(cfg, True) 477 478 return cfg 479 480 481 @lru_cache(maxsize=1024) 482 def import_class_by_path(path: str): 483 """ 484 Recursive import of class by path string. 485 """ 486 paths = path.split('.') 487 path = ".".join(paths[:-1]) 488 class_name = paths[-1] 489 mod = __import__(path, fromlist=[class_name]) 490 mod = getattr(mod, class_name) 491 return mod 492 493 494 def resolve_subclass_pretrained_model_info(base_class) -> List['PretrainedModelInfo']: 495 """ 496 Recursively traverses the inheritance graph of subclasses to extract all pretrained model info. 497 First constructs a set of unique pretrained model info by performing DFS over the inheritance graph. 498 All model info belonging to the same class is added together. 499 500 Args: 501 base_class: The root class, whose subclass graph will be traversed. 502 503 Returns: 504 A list of unique pretrained model infos belonging to all of the inherited subclasses of 505 this baseclass. 506 """ 507 list_of_models = set() 508 509 def recursive_subclass_walk(cls): 510 for subclass in cls.__subclasses__(): 511 # step into its immediate subclass 512 recursive_subclass_walk(subclass) 513 514 subclass_models = subclass.list_available_models() 515 516 if subclass_models is not None and len(subclass_models) > 0: 517 # Inject subclass info into pretrained model info 518 # if not already overriden by subclass 519 for model_info in subclass_models: 520 # If subclass manually injects class_, dont override. 521 if model_info.class_ is None: 522 model_info.class_ = subclass 523 524 for model_info in subclass_models: 525 list_of_models.add(model_info) 526 527 recursive_subclass_walk(base_class) 528 529 list_of_models = list(sorted(list_of_models)) 530 return list_of_models 531 532 533 def check_lib_version(lib_name: str, checked_version: str, operator) -> Tuple[Optional[bool], str]: 534 """ 535 Checks if a library is installed, and if it is, checks the operator(lib.__version__, checked_version) as a result. 536 This bool result along with a string analysis of result is returned. 537 538 If the library is not installed at all, then returns None instead, along with a string explaining 539 that the library is not installed 540 541 Args: 542 lib_name: lower case str name of the library that must be imported. 543 checked_version: semver string that is compared against lib.__version__. 544 operator: binary callable function func(a, b) -> bool; that compares lib.__version__ against version in 545 some manner. Must return a boolean. 546 547 Returns: 548 A tuple of results: 549 - Bool or None. Bool if the library could be imported, and the result of 550 operator(lib.__version__, checked_version) or False if __version__ is not implemented in lib. 551 None is passed if the library is not installed at all. 552 - A string analysis of the check. 553 """ 554 try: 555 if '.' in lib_name: 556 mod = import_class_by_path(lib_name) 557 else: 558 mod = importlib.import_module(lib_name) 559 560 if hasattr(mod, '__version__'): 561 lib_ver = version.Version(mod.__version__) 562 match_ver = version.Version(checked_version) 563 564 if operator(lib_ver, match_ver): 565 msg = f"Lib {lib_name} version is satisfied !" 566 return True, msg 567 else: 568 msg = ( 569 f"Lib {lib_name} version ({lib_ver}) is not {operator.__name__} than required version {checked_version}.\n" 570 f"Please upgrade the lib using either pip or conda to the latest version." 571 ) 572 return False, msg 573 else: 574 msg = ( 575 f"Lib {lib_name} does not implement __version__ in its init file. " 576 f"Could not check version compatibility." 577 ) 578 return False, msg 579 except (ImportError, ModuleNotFoundError): 580 pass 581 582 msg = f"Lib {lib_name} has not been installed. Please use pip or conda to install this package." 583 return None, msg 584 585 586 def uninject_model_parallel_rank(filepath): 587 filepath = str(filepath) 588 if 'mp_rank' in filepath or 'tp_rank' in filepath: 589 dirname = os.path.dirname(os.path.dirname(filepath)) 590 basename = os.path.basename(filepath) 591 filepath = os.path.join(dirname, basename) 592 return filepath 593 else: 594 return filepath 595 596 597 def inject_model_parallel_rank(filepath): 598 """ 599 Injects tensor/pipeline model parallel ranks into the filepath. 600 Does nothing if not using model parallelism. 601 """ 602 # first make sure filepath does not have rank 603 filepath = uninject_model_parallel_rank(filepath) 604 605 app_state = AppState() 606 if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1: 607 # filepath needs to be updated to include mp_rank 608 dirname = os.path.dirname(filepath) 609 basename = os.path.basename(filepath) 610 if app_state.pipeline_model_parallel_size is None or app_state.pipeline_model_parallel_size == 1: 611 filepath = f'{dirname}/mp_rank_{app_state.tensor_model_parallel_rank:02d}/{basename}' 612 else: 613 filepath = f'{dirname}/tp_rank_{app_state.tensor_model_parallel_rank:02d}_pp_rank_{app_state.pipeline_model_parallel_rank:03d}/{basename}' 614 return filepath 615 else: 616 return filepath 617 [end of nemo/utils/model_utils.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
NVIDIA/NeMo
fcfc0ebb23b428a9bee6d847d1e0b37ca0784ba5
Installation instructions should better indicate mandatory steps to make tests pass (or reinstall.sh needs an update) **Is your feature request related to a problem? Please describe.** I wanted to setup a dev conda environment for NeMo, so I followed steps at https://github.com/NVIDIA/NeMo/tree/main#from-source Afterwards `pytest --cpu` was failing (before it could even run any test) with two errors: * `module 'nvidia' has no attribute 'dali'` * `No module named 'pynvml'` **Describe the solution you'd like** After manually installing both libraries with * pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda120 * pip install pynvml the tests were able to pass (`1420 passed, 304 skipped, 261 warnings`). Ideally these libraries would be installed automatically by the `reinstall.sh` script.
These libraries cannot be installed automatically due to this dependence on on extra index for distribution. But these tests and the dali support itself should be import guarded. Do you have a stack trace of which tests failed ? Sure, here's the stack trace: ```shell > pytest --cpu A valid `test_data.tar.gz` test archive (10445891B) found in the `/home/odelalleau/src/NeMo/tests/.data` folder. Setting numba compat : True =============================================================================================== test session starts =============================================================================================== platform linux -- Python 3.8.17, pytest-7.4.0, pluggy-1.2.0 -- /home/odelalleau/miniconda3/envs/py38-tmp/bin/python cachedir: .pytest_cache rootdir: /home/odelalleau/src/NeMo configfile: pyproject.toml testpaths: tests plugins: hydra-core-1.2.0 collected 1694 items / 2 errors / 2 skipped ===================================================================================================== ERRORS ====================================================================================================== ___________________________________________________________________________ ERROR collecting tests/collections/asr/test_asr_datasets.py ___________________________________________________________________________ ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/runner.py:341: in from_call result: Optional[TResult] = func() ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/runner.py:372: in <lambda> call = CallInfo.from_call(lambda: list(collector.collect()), "collect") ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/python.py:531: in collect self._inject_setup_module_fixture() ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/python.py:545: in _inject_setup_module_fixture self.obj, ("setUpModule", "setup_module") ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/python.py:310: in obj self._obj = obj = self._getobj() ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/python.py:528: in _getobj return self._importtestmodule() ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/python.py:617: in _importtestmodule mod = import_path(self.path, mode=importmode, root=self.config.rootpath) ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/pathlib.py:565: in import_path importlib.import_module(module_name) ../../miniconda3/envs/py38-tmp/lib/python3.8/importlib/__init__.py:127: in import_module return _bootstrap._gcd_import(name[level:], package, level) <frozen importlib._bootstrap>:1014: in _gcd_import ??? <frozen importlib._bootstrap>:991: in _find_and_load ??? <frozen importlib._bootstrap>:975: in _find_and_load_unlocked ??? <frozen importlib._bootstrap>:671: in _load_unlocked ??? ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/_pytest/assertion/rewrite.py:178: in exec_module exec(co, module.__dict__) tests/collections/asr/test_asr_datasets.py:58: in <module> HAVE_DALI = is_dali_supported(__DALI_MINIMUM_VERSION__) nemo/collections/asr/data/audio_to_text_dali.py:68: in is_dali_supported module_available, _ = model_utils.check_lib_version( nemo/utils/model_utils.py:556: in check_lib_version mod = import_class_by_path(lib_name) nemo/utils/model_utils.py:490: in import_class_by_path mod = getattr(mod, class_name) E AttributeError: module 'nvidia' has no attribute 'dali' _________________________________________________________________________ ERROR collecting tests/collections/nlp/test_flash_attention.py __________________________________________________________________________ ImportError while importing test module '/home/odelalleau/src/NeMo/tests/collections/nlp/test_flash_attention.py'. Hint: make sure your test modules/packages have valid Python names. Traceback: ../../miniconda3/envs/py38-tmp/lib/python3.8/importlib/__init__.py:127: in import_module return _bootstrap._gcd_import(name[level:], package, level) tests/collections/nlp/test_flash_attention.py:47: in <module> import pynvml E ModuleNotFoundError: No module named 'pynvml' ================================================================================================ warnings summary ================================================================================================= ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/torchmetrics/utilities/imports.py:24 ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/torchmetrics/utilities/imports.py:24 /home/odelalleau/miniconda3/envs/py38-tmp/lib/python3.8/site-packages/torchmetrics/utilities/imports.py:24: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. _PYTHON_LOWER_3_8 = LooseVersion(_PYTHON_VERSION) < LooseVersion("3.8") ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/torch/utils/tensorboard/__init__.py:4 /home/odelalleau/miniconda3/envs/py38-tmp/lib/python3.8/site-packages/torch/utils/tensorboard/__init__.py:4: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if not hasattr(tensorboard, "__version__") or LooseVersion( ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/torch/utils/tensorboard/__init__.py:6 /home/odelalleau/miniconda3/envs/py38-tmp/lib/python3.8/site-packages/torch/utils/tensorboard/__init__.py:6: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. ) < LooseVersion("1.15"): ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/jupyter_client/connect.py:20 /home/odelalleau/miniconda3/envs/py38-tmp/lib/python3.8/site-packages/jupyter_client/connect.py:20: DeprecationWarning: Jupyter is migrating its paths to use standard platformdirs given by the platformdirs library. To remove this warning and see the appropriate new directories, set the environment variable `JUPYTER_PLATFORM_DIRS=1` and then run `jupyter --paths`. The use of platformdirs will be the default in `jupyter_core` v6 from jupyter_core.paths import jupyter_data_dir, jupyter_runtime_dir, secure_write ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/faiss/loader.py:28 /home/odelalleau/miniconda3/envs/py38-tmp/lib/python3.8/site-packages/faiss/loader.py:28: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. if LooseVersion(numpy.__version__) >= "1.19": ../../miniconda3/envs/py38-tmp/lib/python3.8/site-packages/setuptools/_distutils/version.py:346 /home/odelalleau/miniconda3/envs/py38-tmp/lib/python3.8/site-packages/setuptools/_distutils/version.py:346: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. other = LooseVersion(other) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html ============================================================================================= short test summary info ============================================================================================= ERROR tests/collections/asr/test_asr_datasets.py - AttributeError: module 'nvidia' has no attribute 'dali' ERROR tests/collections/nlp/test_flash_attention.py !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Interrupted: 2 errors during collection !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ==================================================================================== 2 skipped, 7 warnings, 2 errors in 1.88s ===================================================================================== ``` Odd - its imported guarded right here - https://github.com/NVIDIA/NeMo/blob/main/tests/collections/asr/test_asr_datasets.py#L57-L60 Ah ok its Attribute error for some reason. Interesting. Let me add a patch
2023-07-28T19:34:30Z
<patch> diff --git a/nemo/utils/model_utils.py b/nemo/utils/model_utils.py --- a/nemo/utils/model_utils.py +++ b/nemo/utils/model_utils.py @@ -576,7 +576,7 @@ def check_lib_version(lib_name: str, checked_version: str, operator) -> Tuple[Op f"Could not check version compatibility." ) return False, msg - except (ImportError, ModuleNotFoundError): + except (ImportError, ModuleNotFoundError, AttributeError): pass msg = f"Lib {lib_name} has not been installed. Please use pip or conda to install this package." </patch>
diff --git a/tests/collections/nlp/test_flash_attention.py b/tests/collections/nlp/test_flash_attention.py --- a/tests/collections/nlp/test_flash_attention.py +++ b/tests/collections/nlp/test_flash_attention.py @@ -44,16 +44,23 @@ except (ImportError, ModuleNotFoundError): HAVE_TRITON = False -import pynvml +try: + import pynvml + + HAVE_PYNVML = True +except (ImportError, ModuleNotFoundError): + HAVE_PYNVML = False def HAVE_AMPERE_GPU(): - pynvml.nvmlInit() - handle = pynvml.nvmlDeviceGetHandleByIndex(0) - device_arch = pynvml.nvmlDeviceGetArchitecture(handle) - pynvml.nvmlShutdown() - return device_arch == pynvml.NVML_DEVICE_ARCH_AMPERE - + if HAVE_PYNVML: + pynvml.nvmlInit() + handle = pynvml.nvmlDeviceGetHandleByIndex(0) + device_arch = pynvml.nvmlDeviceGetArchitecture(handle) + pynvml.nvmlShutdown() + return device_arch == pynvml.NVML_DEVICE_ARCH_AMPERE + else: + return False @pytest.mark.run_only_on('GPU') @pytest.mark.skipif(not HAVE_APEX, reason="apex is not installed")
1.0
slackapi__python-slack-events-api-34
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for request signing ### Description Request signing went live and we should add support into our SDKs. https://api.slack.com/docs/verifying-requests-from-slack ### What type of issue is this? (place an `x` in one of the `[ ]`) - [ ] bug - [x] enhancement (feature request) - [ ] question - [ ] documentation related - [ ] testing related - [ ] discussion ### Requirements * [x] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them. * [x] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct). * [x] I've searched for any related issues and avoided creating a duplicate issue. </issue> <code> [start of README.rst] 1 Slack Events API adapter for Python 2 =================================== 3 4 .. image:: https://travis-ci.org/slackapi/python-slack-events-api.svg?branch=master 5 :target: https://travis-ci.org/slackapi/python-slack-events-api 6 .. image:: https://codecov.io/gh/slackapi/python-slack-events-api/branch/master/graph/badge.svg 7 :target: https://codecov.io/gh/slackapi/python-slack-events-api 8 9 10 The Slack Events Adapter is a Python-based solution to receive and parse events 11 from Slack’s Events API. This library uses an event emitter framework to allow 12 you to easily process Slack events by simply attaching functions 13 to event listeners. 14 15 This adapter enhances and simplifies Slack's Events API by incorporating useful best practices, patterns, and opportunities to abstract out common tasks. 16 17 πŸ’‘ We wrote a `blog post which explains how`_ the Events API can help you, why we built these tools, and how you can use them to build production-ready Slack apps. 18 19 .. _blog post which explains how: https://medium.com/@SlackAPI/enhancing-slacks-events-api-7535827829ab 20 21 22 πŸ€– Installation 23 ---------------- 24 25 .. code:: shell 26 27 pip install slackeventsapi 28 29 πŸ€– App Setup 30 -------------------- 31 32 Before you can use the `Events API`_ you must 33 `create a Slack App`_, and turn on 34 `Event Subscriptions`_. 35 36 πŸ’‘ When you add the Request URL to your app's Event Subscription settings, 37 Slack will send a request containing a `challenge` code to verify that your 38 server is alive. This package handles that URL Verification event for you, so 39 all you need to do is start the example app, start ngrok and configure your 40 URL accordingly. 41 42 βœ… Once you have your `Request URL` verified, your app is ready to start 43 receiving Team Events. 44 45 πŸ”‘ Your server will begin receiving Events from Slack's Events API as soon as a 46 user has authorized your app. 47 48 πŸ€– Development workflow: 49 =========================== 50 51 (1) Create a Slack app on https://api.slack.com/apps/ 52 (2) Add a `bot user` for your app 53 (3) Start the example app on your **Request URL** endpoint 54 (4) Start ngrok and copy the **HTTPS** URL 55 (5) Add your **Request URL** and subscribe your app to events 56 (6) Go to your ngrok URL (e.g. https://myapp12.ngrok.com/) and auth your app 57 58 **πŸŽ‰ Once your app has been authorized, you will begin receiving Slack Events** 59 60 ⚠️ Ngrok is a great tool for developing Slack apps, but we don't recommend using ngrok 61 for production apps. 62 63 πŸ€– Usage 64 ---------- 65 **⚠️ Keep your app's credentials safe!** 66 67 - For development, keep them in virtualenv variables. 68 69 - For production, use a secure data store. 70 71 - Never post your app's credentials to github. 72 73 .. code:: python 74 75 SLACK_VERIFICATION_TOKEN = os.environ["SLACK_VERIFICATION_TOKEN"] 76 77 Create a Slack Event Adapter for receiving actions via the Events API 78 ----------------------------------------------------------------------- 79 **Using the built-in Flask server:** 80 81 .. code:: python 82 83 from slackeventsapi import SlackEventAdapter 84 85 86 slack_events_adapter = SlackEventAdapter(SLACK_VERIFICATION_TOKEN, endpoint="/slack/events") 87 88 89 # Create an event listener for "reaction_added" events and print the emoji name 90 @slack_events_adapter.on("reaction_added") 91 def reaction_added(event): 92 emoji = event.get("reaction") 93 print(emoji) 94 95 96 # Start the server on port 3000 97 slack_events_adapter.start(port=3000) 98 99 100 **Using your existing Flask instance:** 101 102 103 .. code:: python 104 105 from flask import Flask 106 from slackeventsapi import SlackEventAdapter 107 108 109 # This `app` represents your existing Flask app 110 app = Flask(__name__) 111 112 113 # An example of one of your Flask app's routes 114 @app.route("/") 115 def hello(): 116 return "Hello there!" 117 118 119 # Bind the Events API route to your existing Flask app by passing the server 120 # instance as the last param, or with `server=app`. 121 slack_events_adapter = SlackEventAdapter(SLACK_VERIFICATION_TOKEN, "/slack/events", app) 122 123 124 # Create an event listener for "reaction_added" events and print the emoji name 125 @slack_events_adapter.on("reaction_added") 126 def reaction_added(event): 127 emoji = event.get("reaction") 128 print(emoji) 129 130 131 # Start the server on port 3000 132 if __name__ == "__main__": 133 app.run(port=3000) 134 135 For a comprehensive list of available Slack `Events` and more information on 136 `Scopes`, see https://api.slack.com/events-api 137 138 πŸ€– Example event listeners 139 ----------------------------- 140 141 See `example.py`_ for usage examples. This example also utilizes the 142 SlackClient Web API client. 143 144 .. _example.py: /example/ 145 146 πŸ€” Support 147 ----------- 148 149 Need help? Join `Bot Developer Hangout`_ and talk to us in `#slack-api`_. 150 151 You can also `create an Issue`_ right here on GitHub. 152 153 .. _Events API: https://api.slack.com/events-api 154 .. _create a Slack App: https://api.slack.com/apps/new 155 .. _Event Subscriptions: https://api.slack.com/events-api#subscriptions 156 .. _Bot Developer Hangout: http://dev4slack.xoxco.com/ 157 .. _#slack-api: https://dev4slack.slack.com/messages/slack-api/ 158 .. _create an Issue: https://github.com/slackapi/python-slack-events-api/issues/new 159 [end of README.rst] [start of example/example.py] 1 from slackeventsapi import SlackEventAdapter 2 from slackclient import SlackClient 3 import os 4 5 # Our app's Slack Event Adapter for receiving actions via the Events API 6 SLACK_VERIFICATION_TOKEN = os.environ["SLACK_VERIFICATION_TOKEN"] 7 slack_events_adapter = SlackEventAdapter(SLACK_VERIFICATION_TOKEN, "/slack/events") 8 9 # Create a SlackClient for your bot to use for Web API requests 10 SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"] 11 CLIENT = SlackClient(SLACK_BOT_TOKEN) 12 13 # Example responder to greetings 14 @slack_events_adapter.on("message") 15 def handle_message(event_data): 16 message = event_data["event"] 17 # If the incoming message contains "hi", then respond with a "Hello" message 18 if message.get("subtype") is None and "hi" in message.get('text'): 19 channel = message["channel"] 20 message = "Hello <@%s>! :tada:" % message["user"] 21 CLIENT.api_call("chat.postMessage", channel=channel, text=message) 22 23 24 # Example reaction emoji echo 25 @slack_events_adapter.on("reaction_added") 26 def reaction_added(event_data): 27 event = event_data["event"] 28 emoji = event["reaction"] 29 channel = event["item"]["channel"] 30 text = ":%s:" % emoji 31 CLIENT.api_call("chat.postMessage", channel=channel, text=text) 32 33 # Once we have our event listeners configured, we can start the 34 # Flask server with the default `/events` endpoint on port 3000 35 slack_events_adapter.start(port=3000) 36 [end of example/example.py] [start of slackeventsapi/__init__.py] 1 from pyee import EventEmitter 2 from .server import SlackServer 3 4 5 class SlackEventAdapter(EventEmitter): 6 # Initialize the Slack event server 7 # If no endpoint is provided, default to listening on '/slack/events' 8 def __init__(self, verification_token, endpoint="/slack/events", server=None): 9 EventEmitter.__init__(self) 10 self.verification_token = verification_token 11 self.server = SlackServer(verification_token, endpoint, self, server) 12 13 def start(self, host='127.0.0.1', port=None, debug=False, **kwargs): 14 """ 15 Start the built in webserver, bound to the host and port you'd like. 16 Default host is `127.0.0.1` and port 8080. 17 18 :param host: The host you want to bind the build in webserver to 19 :param port: The port number you want the webserver to run on 20 :param debug: Set to `True` to enable debug level logging 21 :param kwargs: Additional arguments you'd like to pass to Flask 22 """ 23 self.server.run(host=host, port=port, debug=debug, **kwargs) 24 [end of slackeventsapi/__init__.py] [start of slackeventsapi/server.py] 1 from flask import Flask, request, make_response 2 import json 3 import platform 4 import sys 5 from .version import __version__ 6 7 8 class SlackServer(Flask): 9 def __init__(self, verification_token, endpoint, emitter, server): 10 self.verification_token = verification_token 11 self.emitter = emitter 12 self.endpoint = endpoint 13 self.package_info = self.get_package_info() 14 15 # If a server is passed in, bind the event handler routes to it, 16 # otherwise create a new Flask instance. 17 if server: 18 if isinstance(server, Flask): 19 self.bind_route(server) 20 else: 21 raise TypeError("Server must be an instance of Flask") 22 else: 23 Flask.__init__(self, __name__) 24 self.bind_route(self) 25 26 def get_package_info(self): 27 client_name = __name__.split('.')[0] 28 client_version = __version__ # Version is returned from version.py 29 30 # Collect the package info, Python version and OS version. 31 package_info = { 32 "client": "{0}/{1}".format(client_name, client_version), 33 "python": "Python/{v.major}.{v.minor}.{v.micro}".format(v=sys.version_info), 34 "system": "{0}/{1}".format(platform.system(), platform.release()) 35 } 36 37 # Concatenate and format the user-agent string to be passed into request headers 38 ua_string = [] 39 for key, val in package_info.items(): 40 ua_string.append(val) 41 42 return " ".join(ua_string) 43 44 def bind_route(self, server): 45 @server.route(self.endpoint, methods=['GET', 'POST']) 46 def event(): 47 # If a GET request is made, return 404. 48 if request.method == 'GET': 49 return make_response("These are not the slackbots you're looking for.", 404) 50 51 # Parse the request payload into JSON 52 event_data = json.loads(request.data.decode('utf-8')) 53 54 # Echo the URL verification challenge code 55 if "challenge" in event_data: 56 return make_response( 57 event_data.get("challenge"), 200, {"content_type": "application/json"} 58 ) 59 60 # Verify the request token 61 request_token = event_data.get("token") 62 if self.verification_token != request_token: 63 self.emitter.emit('error', Exception('invalid verification token')) 64 return make_response("Request contains invalid Slack verification token", 403) 65 66 # Parse the Event payload and emit the event to the event listener 67 if "event" in event_data: 68 event_type = event_data["event"]["type"] 69 self.emitter.emit(event_type, event_data) 70 response = make_response("", 200) 71 response.headers['X-Slack-Powered-By'] = self.package_info 72 return response 73 [end of slackeventsapi/server.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
slackapi/python-slack-events-api
79949e66cc442b241eaed08ffe79d8dc7a166638
Add support for request signing ### Description Request signing went live and we should add support into our SDKs. https://api.slack.com/docs/verifying-requests-from-slack ### What type of issue is this? (place an `x` in one of the `[ ]`) - [ ] bug - [x] enhancement (feature request) - [ ] question - [ ] documentation related - [ ] testing related - [ ] discussion ### Requirements * [x] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them. * [x] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct). * [x] I've searched for any related issues and avoided creating a duplicate issue.
someone has a branch somewhere with this implemented, i hear πŸ‘‚ we'll get some more details into here asap.
2018-08-08T18:22:04Z
<patch> diff --git a/example/example.py b/example/example.py --- a/example/example.py +++ b/example/example.py @@ -3,12 +3,12 @@ import os # Our app's Slack Event Adapter for receiving actions via the Events API -SLACK_VERIFICATION_TOKEN = os.environ["SLACK_VERIFICATION_TOKEN"] -slack_events_adapter = SlackEventAdapter(SLACK_VERIFICATION_TOKEN, "/slack/events") +slack_signing_secret = os.environ["SLACK_SIGNING_SECRET"] +slack_events_adapter = SlackEventAdapter(slack_signing_secret, "/slack/events") # Create a SlackClient for your bot to use for Web API requests -SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"] -CLIENT = SlackClient(SLACK_BOT_TOKEN) +slack_bot_token = os.environ["SLACK_BOT_TOKEN"] +slack_client = SlackClient(slack_bot_token) # Example responder to greetings @slack_events_adapter.on("message") @@ -18,7 +18,7 @@ def handle_message(event_data): if message.get("subtype") is None and "hi" in message.get('text'): channel = message["channel"] message = "Hello <@%s>! :tada:" % message["user"] - CLIENT.api_call("chat.postMessage", channel=channel, text=message) + slack_client.api_call("chat.postMessage", channel=channel, text=message) # Example reaction emoji echo @@ -28,7 +28,12 @@ def reaction_added(event_data): emoji = event["reaction"] channel = event["item"]["channel"] text = ":%s:" % emoji - CLIENT.api_call("chat.postMessage", channel=channel, text=text) + slack_client.api_call("chat.postMessage", channel=channel, text=text) + +# Error events +@slack_events_adapter.on("error") +def error_handler(err): + print("ERROR: " + str(err)) # Once we have our event listeners configured, we can start the # Flask server with the default `/events` endpoint on port 3000 diff --git a/slackeventsapi/__init__.py b/slackeventsapi/__init__.py --- a/slackeventsapi/__init__.py +++ b/slackeventsapi/__init__.py @@ -5,10 +5,10 @@ class SlackEventAdapter(EventEmitter): # Initialize the Slack event server # If no endpoint is provided, default to listening on '/slack/events' - def __init__(self, verification_token, endpoint="/slack/events", server=None): + def __init__(self, signing_secret, endpoint="/slack/events", server=None, **kwargs): EventEmitter.__init__(self) - self.verification_token = verification_token - self.server = SlackServer(verification_token, endpoint, self, server) + self.signing_secret = signing_secret + self.server = SlackServer(signing_secret, endpoint, self, server, **kwargs) def start(self, host='127.0.0.1', port=None, debug=False, **kwargs): """ diff --git a/slackeventsapi/server.py b/slackeventsapi/server.py --- a/slackeventsapi/server.py +++ b/slackeventsapi/server.py @@ -2,12 +2,15 @@ import json import platform import sys +import hmac +import hashlib +from time import time from .version import __version__ class SlackServer(Flask): - def __init__(self, verification_token, endpoint, emitter, server): - self.verification_token = verification_token + def __init__(self, signing_secret, endpoint, emitter, server): + self.signing_secret = signing_secret self.emitter = emitter self.endpoint = endpoint self.package_info = self.get_package_info() @@ -41,6 +44,44 @@ def get_package_info(self): return " ".join(ua_string) + def verify_signature(self, timestamp, signature): + # Verify the request signature of the request sent from Slack + # Generate a new hash using the app's signing secret and request data + + # Compare the generated hash and incoming request signature + # Python 2.7.6 doesn't support compare_digest + # It's recommended to use Python 2.7.7+ + # noqa See https://docs.python.org/2/whatsnew/2.7.html#pep-466-network-security-enhancements-for-python-2-7 + if hasattr(hmac, "compare_digest"): + req = str.encode('v0:' + str(timestamp) + ':') + request.data + request_hash = 'v0=' + hmac.new( + str.encode(self.signing_secret), + req, hashlib.sha256 + ).hexdigest() + # Compare byte strings for Python 2 + if (sys.version_info[0] == 2): + return hmac.compare_digest(bytes(request_hash), bytes(signature)) + else: + return hmac.compare_digest(request_hash, signature) + else: + # So, we'll compare the signatures explicitly + req = str.encode('v0:' + str(timestamp) + ':') + request.data + request_hash = 'v0=' + hmac.new( + str.encode(self.signing_secret), + req, hashlib.sha256 + ).hexdigest() + + if len(request_hash) != len(signature): + return False + result = 0 + if isinstance(request_hash, bytes) and isinstance(signature, bytes): + for x, y in zip(request_hash, signature): + result |= x ^ y + else: + for x, y in zip(request_hash, signature): + result |= ord(x) ^ ord(y) + return result == 0 + def bind_route(self, server): @server.route(self.endpoint, methods=['GET', 'POST']) def event(): @@ -48,21 +89,31 @@ def event(): if request.method == 'GET': return make_response("These are not the slackbots you're looking for.", 404) + # Each request comes with request timestamp and request signature + # emit an error if the timestamp is out of range + req_timestamp = request.headers.get('X-Slack-Request-Timestamp') + if abs(time() - int(req_timestamp)) > 60 * 5: + slack_exception = SlackEventAdapterException('Invalid request timestamp') + self.emitter.emit('error', slack_exception) + return make_response("", 403) + + # Verify the request signature using the app's signing secret + # emit an error if the signature can't be verified + req_signature = request.headers.get('X-Slack-Signature') + if not self.verify_signature(req_timestamp, req_signature): + slack_exception = SlackEventAdapterException('Invalid request signature') + self.emitter.emit('error', slack_exception) + return make_response("", 403) + # Parse the request payload into JSON event_data = json.loads(request.data.decode('utf-8')) - # Echo the URL verification challenge code + # Echo the URL verification challenge code back to Slack if "challenge" in event_data: return make_response( event_data.get("challenge"), 200, {"content_type": "application/json"} ) - # Verify the request token - request_token = event_data.get("token") - if self.verification_token != request_token: - self.emitter.emit('error', Exception('invalid verification token')) - return make_response("Request contains invalid Slack verification token", 403) - # Parse the Event payload and emit the event to the event listener if "event" in event_data: event_type = event_data["event"]["type"] @@ -70,3 +121,14 @@ def event(): response = make_response("", 200) response.headers['X-Slack-Powered-By'] = self.package_info return response + + +class SlackEventAdapterException(Exception): + """ + Base exception for all errors raised by the SlackClient library + """ + def __init__(self, msg=None): + if msg is None: + # default error message + msg = "An error occurred in the SlackEventsApiAdapter library" + super(SlackEventAdapterException, self).__init__(msg) </patch>
diff --git a/tests/conftest.py b/tests/conftest.py --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,19 @@ -import pytest import json +import hashlib +import hmac +import pytest from slackeventsapi import SlackEventAdapter +def create_signature(secret, timestamp, data): + req = str.encode('v0:' + str(timestamp) + ':') + str.encode(data) + request_signature= 'v0='+hmac.new( + str.encode(secret), + req, hashlib.sha256 + ).hexdigest() + return request_signature + + def load_event_fixture(event, as_string=True): filename = "tests/data/{}.json".format(event) with open(filename) as json_data: @@ -23,12 +34,14 @@ def pytest_namespace(): return { 'reaction_event_fixture': load_event_fixture('reaction_added'), 'url_challenge_fixture': load_event_fixture('url_challenge'), - 'bad_token_fixture': event_with_bad_token() + 'bad_token_fixture': event_with_bad_token(), + 'create_signature': create_signature } @pytest.fixture def app(): - adapter = SlackEventAdapter("vFO9LARnLI7GflLR8tGqHgdy") + adapter = SlackEventAdapter("SIGNING_SECRET") app = adapter.server + app.testing = True return app diff --git a/tests/test_events.py b/tests/test_events.py --- a/tests/test_events.py +++ b/tests/test_events.py @@ -1,21 +1,27 @@ +import time import pytest from slackeventsapi import SlackEventAdapter -ADAPTER = SlackEventAdapter('vFO9LARnLI7GflLR8tGqHgdy') - +ADAPTER = SlackEventAdapter('SIGNING_SECRET') def test_event_emission(client): # Events should trigger an event - data = pytest.reaction_event_fixture - @ADAPTER.on('reaction_added') def event_handler(event): assert event["reaction"] == 'grinning' + data = pytest.reaction_event_fixture + timestamp = int(time.time()) + signature = pytest.create_signature(ADAPTER.signing_secret, timestamp, data) + res = client.post( '/slack/events', data=data, - content_type='application/json' + content_type='application/json', + headers={ + 'X-Slack-Request-Timestamp': timestamp, + 'X-Slack-Signature': signature + } ) assert res.status_code == 200 diff --git a/tests/test_server.py b/tests/test_server.py --- a/tests/test_server.py +++ b/tests/test_server.py @@ -2,20 +2,23 @@ from flask import Flask import pytest import sys +import hmac +import time from slackeventsapi import SlackEventAdapter +from slackeventsapi.server import SlackEventAdapterException from slackeventsapi.version import __version__ def test_existing_flask(): valid_flask = Flask(__name__) - valid_adapter = SlackEventAdapter("vFO9LARnLI7GflLR8tGqHgdy", "/slack/events", valid_flask) + valid_adapter = SlackEventAdapter("SIGNING_SECRET", "/slack/events", valid_flask) assert isinstance(valid_adapter, SlackEventAdapter) def test_server_not_flask(): with pytest.raises(TypeError) as e: invalid_flask = "I am not a Flask" - SlackEventAdapter("vFO9LARnLI7GflLR8tGqHgdy", "/slack/events", invalid_flask) + SlackEventAdapter("SIGNING_SECRET", "/slack/events", invalid_flask) assert e.value.args[0] == 'Server must be an instance of Flask' @@ -26,33 +29,110 @@ def test_event_endpoint_get(client): def test_url_challenge(client): + slack_adapter = SlackEventAdapter("SIGNING_SECRET") data = pytest.url_challenge_fixture + timestamp = int(time.time()) + signature = pytest.create_signature(slack_adapter.signing_secret, timestamp, data) + res = client.post( '/slack/events', data=data, - content_type='application/json') + content_type='application/json', + headers={ + 'X-Slack-Request-Timestamp': timestamp, + 'X-Slack-Signature': signature + } + ) assert res.status_code == 200 assert bytes.decode(res.data) == "valid_challenge_token" -def test_valid_event_request(client): +def test_invalid_request_signature(client): + # Verify [package metadata header is set + slack_adapter = SlackEventAdapter("SIGNING_SECRET") + + data = pytest.reaction_event_fixture + timestamp = int(time.time()) + signature = "bad signature" + + with pytest.raises(SlackEventAdapterException) as excinfo: + res = client.post( + '/slack/events', + data=data, + content_type='application/json', + headers={ + 'X-Slack-Request-Timestamp': timestamp, + 'X-Slack-Signature': signature + } + ) + + assert str(excinfo.value) == 'Invalid request signature' + + +def test_invalid_request_timestamp(client): + # Verify [package metadata header is set + slack_adapter = SlackEventAdapter("SIGNING_SECRET") + + data = pytest.reaction_event_fixture + timestamp = int(time.time()+1000) + signature = "bad timestamp" + + with pytest.raises(SlackEventAdapterException) as excinfo: + res = client.post( + '/slack/events', + data=data, + content_type='application/json', + headers={ + 'X-Slack-Request-Timestamp': timestamp, + 'X-Slack-Signature': signature + } + ) + + assert str(excinfo.value) == 'Invalid request timestamp' + + +def test_compare_digest_fallback(client, monkeypatch): + # Verify [package metadata header is set + slack_adapter = SlackEventAdapter("SIGNING_SECRET") + + if hasattr(hmac, "compare_digest"): + monkeypatch.delattr(hmac, 'compare_digest') + data = pytest.reaction_event_fixture + timestamp = int(time.time()) + signature =pytest.create_signature(slack_adapter.signing_secret, timestamp, data) + res = client.post( '/slack/events', data=data, - content_type='application/json') + content_type='application/json', + headers={ + 'X-Slack-Request-Timestamp': timestamp, + 'X-Slack-Signature': signature + } + ) + assert res.status_code == 200 def test_version_header(client): # Verify [package metadata header is set - package_info = SlackEventAdapter("token").server.package_info + slack_adapter = SlackEventAdapter("SIGNING_SECRET") + package_info = slack_adapter.server.package_info data = pytest.reaction_event_fixture + timestamp = int(time.time()) + signature = pytest.create_signature(slack_adapter.signing_secret, timestamp, data) + res = client.post( '/slack/events', data=data, - content_type='application/json') + content_type='application/json', + headers={ + 'X-Slack-Request-Timestamp': timestamp, + 'X-Slack-Signature': signature + } + ) assert res.status_code == 200 assert res.headers["X-Slack-Powered-By"] == package_info @@ -60,7 +140,14 @@ def test_version_header(client): def test_server_start(mocker): # Verify server started with correct params - slack_events_adapter = SlackEventAdapter("token") + slack_events_adapter = SlackEventAdapter("SIGNING_SECRET") mocker.spy(slack_events_adapter, 'server') slack_events_adapter.start(port=3000) slack_events_adapter.server.run.assert_called_once_with(debug=False, host='127.0.0.1', port=3000) + + +def test_default_exception_msg(mocker): + with pytest.raises(SlackEventAdapterException) as excinfo: + raise SlackEventAdapterException + + assert str(excinfo.value) == 'An error occurred in the SlackEventsApiAdapter library'
1.0
celery__celery-2666
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Celerybeat runs periodic tasks every 5 seconds regardless of interval I recently upgraded to celery 3 and have been experiencing some strange behavior with celerybeat, which was acting normally before the upgrade. My environment: python 2.7.3 django 1.4.1 virtualenv django-celery 3.0.6 celery 3.0.6 Regardless of what interval I define for my periodic tasks - which I adjust through the django admin - celerybeat fires off the periodic tasks every 5 seconds. This occurs regardless of the interval. Interestingly, however, when the periodic task is set disabled, celerybeat stops sending tasks, so it must be responsive to the scheduler at some level. </issue> <code> [start of README.rst] 1 ================================= 2 celery - Distributed Task Queue 3 ================================= 4 5 .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png 6 7 |build-status| |coverage-status| 8 9 :Version: 3.2.0a1 (Cipater) 10 :Web: http://celeryproject.org/ 11 :Download: http://pypi.python.org/pypi/celery/ 12 :Source: http://github.com/celery/celery/ 13 :Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis, 14 python, webhooks, queue, distributed 15 16 -- 17 18 What is a Task Queue? 19 ===================== 20 21 Task queues are used as a mechanism to distribute work across threads or 22 machines. 23 24 A task queue's input is a unit of work, called a task, dedicated worker 25 processes then constantly monitor the queue for new work to perform. 26 27 Celery communicates via messages, usually using a broker 28 to mediate between clients and workers. To initiate a task a client puts a 29 message on the queue, the broker then delivers the message to a worker. 30 31 A Celery system can consist of multiple workers and brokers, giving way 32 to high availability and horizontal scaling. 33 34 Celery is a library written in Python, but the protocol can be implemented in 35 any language. So far there's RCelery_ for the Ruby programming language, and a 36 `PHP client`, but language interoperability can also be achieved 37 by using webhooks. 38 39 .. _RCelery: https://github.com/leapfrogonline/rcelery 40 .. _`PHP client`: https://github.com/gjedeer/celery-php 41 .. _`using webhooks`: 42 http://docs.celeryproject.org/en/latest/userguide/remote-tasks.html 43 44 What do I need? 45 =============== 46 47 Celery version 3.0 runs on, 48 49 - Python (2.6, 2.7, 3.3, 3.4) 50 - PyPy (1.8, 1.9) 51 - Jython (2.5, 2.7). 52 53 This is the last version to support Python 2.5, 54 and from Celery 3.1, Python 2.6 or later is required. 55 The last version to support Python 2.4 was Celery series 2.2. 56 57 *Celery* is usually used with a message broker to send and receive messages. 58 The RabbitMQ, Redis transports are feature complete, 59 but there's also experimental support for a myriad of other solutions, including 60 using SQLite for local development. 61 62 *Celery* can run on a single machine, on multiple machines, or even 63 across datacenters. 64 65 Get Started 66 =========== 67 68 If this is the first time you're trying to use Celery, or you are 69 new to Celery 3.0 coming from previous versions then you should read our 70 getting started tutorials: 71 72 - `First steps with Celery`_ 73 74 Tutorial teaching you the bare minimum needed to get started with Celery. 75 76 - `Next steps`_ 77 78 A more complete overview, showing more features. 79 80 .. _`First steps with Celery`: 81 http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html 82 83 .. _`Next steps`: 84 http://docs.celeryproject.org/en/latest/getting-started/next-steps.html 85 86 Celery is... 87 ========== 88 89 - **Simple** 90 91 Celery is easy to use and maintain, and does *not need configuration files*. 92 93 It has an active, friendly community you can talk to for support, 94 including a `mailing-list`_ and and an IRC channel. 95 96 Here's one of the simplest applications you can make:: 97 98 from celery import Celery 99 100 app = Celery('hello', broker='amqp://guest@localhost//') 101 102 @app.task 103 def hello(): 104 return 'hello world' 105 106 - **Highly Available** 107 108 Workers and clients will automatically retry in the event 109 of connection loss or failure, and some brokers support 110 HA in way of *Master/Master* or *Master/Slave* replication. 111 112 - **Fast** 113 114 A single Celery process can process millions of tasks a minute, 115 with sub-millisecond round-trip latency (using RabbitMQ, 116 py-librabbitmq, and optimized settings). 117 118 - **Flexible** 119 120 Almost every part of *Celery* can be extended or used on its own, 121 Custom pool implementations, serializers, compression schemes, logging, 122 schedulers, consumers, producers, autoscalers, broker transports and much more. 123 124 It supports... 125 ============ 126 127 - **Message Transports** 128 129 - RabbitMQ_, Redis_, 130 - MongoDB_ (experimental), Amazon SQS (experimental), 131 - CouchDB_ (experimental), SQLAlchemy_ (experimental), 132 - Django ORM (experimental), `IronMQ`_ 133 - and more... 134 135 - **Concurrency** 136 137 - Prefork, Eventlet_, gevent_, threads/single threaded 138 139 - **Result Stores** 140 141 - AMQP, Redis 142 - memcached, MongoDB 143 - SQLAlchemy, Django ORM 144 - Apache Cassandra, IronCache 145 146 - **Serialization** 147 148 - *pickle*, *json*, *yaml*, *msgpack*. 149 - *zlib*, *bzip2* compression. 150 - Cryptographic message signing. 151 152 .. _`Eventlet`: http://eventlet.net/ 153 .. _`gevent`: http://gevent.org/ 154 155 .. _RabbitMQ: http://rabbitmq.com 156 .. _Redis: http://redis.io 157 .. _MongoDB: http://mongodb.org 158 .. _Beanstalk: http://kr.github.com/beanstalkd 159 .. _CouchDB: http://couchdb.apache.org 160 .. _SQLAlchemy: http://sqlalchemy.org 161 .. _`IronMQ`: http://iron.io 162 163 Framework Integration 164 ===================== 165 166 Celery is easy to integrate with web frameworks, some of which even have 167 integration packages: 168 169 +--------------------+------------------------+ 170 | `Django`_ | not needed | 171 +--------------------+------------------------+ 172 | `Pyramid`_ | `pyramid_celery`_ | 173 +--------------------+------------------------+ 174 | `Pylons`_ | `celery-pylons`_ | 175 +--------------------+------------------------+ 176 | `Flask`_ | not needed | 177 +--------------------+------------------------+ 178 | `web2py`_ | `web2py-celery`_ | 179 +--------------------+------------------------+ 180 | `Tornado`_ | `tornado-celery`_ | 181 +--------------------+------------------------+ 182 183 The integration packages are not strictly necessary, but they can make 184 development easier, and sometimes they add important hooks like closing 185 database connections at ``fork``. 186 187 .. _`Django`: http://djangoproject.com/ 188 .. _`Pylons`: http://www.pylonsproject.org/ 189 .. _`Flask`: http://flask.pocoo.org/ 190 .. _`web2py`: http://web2py.com/ 191 .. _`Bottle`: http://bottlepy.org/ 192 .. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html 193 .. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/ 194 .. _`django-celery`: http://pypi.python.org/pypi/django-celery 195 .. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons 196 .. _`web2py-celery`: http://code.google.com/p/web2py-celery/ 197 .. _`Tornado`: http://www.tornadoweb.org/ 198 .. _`tornado-celery`: http://github.com/mher/tornado-celery/ 199 200 .. _celery-documentation: 201 202 Documentation 203 ============= 204 205 The `latest documentation`_ with user guides, tutorials and API reference 206 is hosted at Read The Docs. 207 208 .. _`latest documentation`: http://docs.celeryproject.org/en/latest/ 209 210 .. _celery-installation: 211 212 Installation 213 ============ 214 215 You can install Celery either via the Python Package Index (PyPI) 216 or from source. 217 218 To install using `pip`,:: 219 220 $ pip install -U Celery 221 222 To install using `easy_install`,:: 223 224 $ easy_install -U Celery 225 226 .. _bundles: 227 228 Bundles 229 ------- 230 231 Celery also defines a group of bundles that can be used 232 to install Celery and the dependencies for a given feature. 233 234 You can specify these in your requirements or on the ``pip`` comand-line 235 by using brackets. Multiple bundles can be specified by separating them by 236 commas. 237 :: 238 239 $ pip install "celery[librabbitmq]" 240 241 $ pip install "celery[librabbitmq,redis,auth,msgpack]" 242 243 The following bundles are available: 244 245 Serializers 246 ~~~~~~~~~~~ 247 248 :celery[auth]: 249 for using the auth serializer. 250 251 :celery[msgpack]: 252 for using the msgpack serializer. 253 254 :celery[yaml]: 255 for using the yaml serializer. 256 257 Concurrency 258 ~~~~~~~~~~~ 259 260 :celery[eventlet]: 261 for using the eventlet pool. 262 263 :celery[gevent]: 264 for using the gevent pool. 265 266 :celery[threads]: 267 for using the thread pool. 268 269 Transports and Backends 270 ~~~~~~~~~~~~~~~~~~~~~~~ 271 272 :celery[librabbitmq]: 273 for using the librabbitmq C library. 274 275 :celery[redis]: 276 for using Redis as a message transport or as a result backend. 277 278 :celery[mongodb]: 279 for using MongoDB as a message transport (*experimental*), 280 or as a result backend (*supported*). 281 282 :celery[sqs]: 283 for using Amazon SQS as a message transport (*experimental*). 284 285 :celery[memcache]: 286 for using memcached as a result backend. 287 288 :celery[cassandra]: 289 for using Apache Cassandra as a result backend. 290 291 :celery[couchdb]: 292 for using CouchDB as a message transport (*experimental*). 293 294 :celery[couchbase]: 295 for using CouchBase as a result backend. 296 297 :celery[beanstalk]: 298 for using Beanstalk as a message transport (*experimental*). 299 300 :celery[zookeeper]: 301 for using Zookeeper as a message transport. 302 303 :celery[zeromq]: 304 for using ZeroMQ as a message transport (*experimental*). 305 306 :celery[sqlalchemy]: 307 for using SQLAlchemy as a message transport (*experimental*), 308 or as a result backend (*supported*). 309 310 :celery[pyro]: 311 for using the Pyro4 message transport (*experimental*). 312 313 :celery[slmq]: 314 for using the SoftLayer Message Queue transport (*experimental*). 315 316 .. _celery-installing-from-source: 317 318 Downloading and installing from source 319 -------------------------------------- 320 321 Download the latest version of Celery from 322 http://pypi.python.org/pypi/celery/ 323 324 You can install it by doing the following,:: 325 326 $ tar xvfz celery-0.0.0.tar.gz 327 $ cd celery-0.0.0 328 $ python setup.py build 329 # python setup.py install 330 331 The last command must be executed as a privileged user if 332 you are not currently using a virtualenv. 333 334 .. _celery-installing-from-git: 335 336 Using the development version 337 ----------------------------- 338 339 With pip 340 ~~~~~~~~ 341 342 The Celery development version also requires the development 343 versions of ``kombu``, ``amqp`` and ``billiard``. 344 345 You can install the latest snapshot of these using the following 346 pip commands:: 347 348 $ pip install https://github.com/celery/celery/zipball/master#egg=celery 349 $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard 350 $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp 351 $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu 352 353 With git 354 ~~~~~~~~ 355 356 Please the Contributing section. 357 358 .. _getting-help: 359 360 Getting Help 361 ============ 362 363 .. _mailing-list: 364 365 Mailing list 366 ------------ 367 368 For discussions about the usage, development, and future of celery, 369 please join the `celery-users`_ mailing list. 370 371 .. _`celery-users`: http://groups.google.com/group/celery-users/ 372 373 .. _irc-channel: 374 375 IRC 376 --- 377 378 Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ 379 network. 380 381 .. _`Freenode`: http://freenode.net 382 383 .. _bug-tracker: 384 385 Bug tracker 386 =========== 387 388 If you have any suggestions, bug reports or annoyances please report them 389 to our issue tracker at http://github.com/celery/celery/issues/ 390 391 .. _wiki: 392 393 Wiki 394 ==== 395 396 http://wiki.github.com/celery/celery/ 397 398 399 .. _maintainers: 400 401 Maintainers 402 =========== 403 404 - `@ask`_ (primary maintainer) 405 - `@thedrow`_ 406 - `@chrisgogreen`_ 407 - `@PMickael`_ 408 - `@malinoff`_ 409 - And you? We really need more: https://github.com/celery/celery/issues/2534 410 411 .. _`@ask`: http://github.com/ask 412 .. _`@thedrow`: http://github.com/thedrow 413 .. _`@chrisgogreen`: http://github.com/chrisgogreen 414 .. _`@PMickael`: http://github.com/PMickael 415 .. _`@malinoff`: http://github.com/malinoff 416 417 418 .. _contributing-short: 419 420 Contributing 421 ============ 422 423 Development of `celery` happens at Github: http://github.com/celery/celery 424 425 You are highly encouraged to participate in the development 426 of `celery`. If you don't like Github (for some reason) you're welcome 427 to send regular patches. 428 429 Be sure to also read the `Contributing to Celery`_ section in the 430 documentation. 431 432 .. _`Contributing to Celery`: 433 http://docs.celeryproject.org/en/master/contributing.html 434 435 .. _license: 436 437 License 438 ======= 439 440 This software is licensed under the `New BSD License`. See the ``LICENSE`` 441 file in the top distribution directory for the full license text. 442 443 .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround 444 445 446 .. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png 447 :alt: Bitdeli badge 448 :target: https://bitdeli.com/free 449 450 .. |build-status| image:: https://travis-ci.org/celery/celery.svg?branch=master 451 :target: https://travis-ci.org/celery/celery 452 .. |coverage-status| image:: https://coveralls.io/repos/celery/celery/badge.svg 453 :target: https://coveralls.io/r/celery/celery 454 [end of README.rst] [start of celery/schedules.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.schedules 4 ~~~~~~~~~~~~~~~~ 5 6 Schedules define the intervals at which periodic tasks 7 should run. 8 9 """ 10 from __future__ import absolute_import 11 12 import numbers 13 import re 14 15 from collections import namedtuple 16 from datetime import datetime, timedelta 17 18 from kombu.utils import cached_property 19 20 from . import current_app 21 from .five import range, string_t 22 from .utils import is_iterable 23 from .utils.timeutils import ( 24 weekday, maybe_timedelta, remaining, humanize_seconds, 25 timezone, maybe_make_aware, ffwd, localize 26 ) 27 from .datastructures import AttributeDict 28 29 __all__ = ['ParseException', 'schedule', 'crontab', 'crontab_parser', 30 'maybe_schedule', 'solar'] 31 32 schedstate = namedtuple('schedstate', ('is_due', 'next')) 33 34 35 CRON_PATTERN_INVALID = """\ 36 Invalid crontab pattern. Valid range is {min}-{max}. \ 37 '{value}' was found.\ 38 """ 39 40 CRON_INVALID_TYPE = """\ 41 Argument cronspec needs to be of any of the following types: \ 42 int, str, or an iterable type. {type!r} was given.\ 43 """ 44 45 CRON_REPR = """\ 46 <crontab: {0._orig_minute} {0._orig_hour} {0._orig_day_of_week} \ 47 {0._orig_day_of_month} {0._orig_month_of_year} (m/h/d/dM/MY)>\ 48 """ 49 50 SOLAR_INVALID_LATITUDE = """\ 51 Argument latitude {lat} is invalid, must be between -90 and 90.\ 52 """ 53 54 SOLAR_INVALID_LONGITUDE = """\ 55 Argument longitude {lon} is invalid, must be between -180 and 180.\ 56 """ 57 58 SOLAR_INVALID_EVENT = """\ 59 Argument event "{event}" is invalid, must be one of {all_events}.\ 60 """ 61 62 63 def cronfield(s): 64 return '*' if s is None else s 65 66 67 class ParseException(Exception): 68 """Raised by crontab_parser when the input can't be parsed.""" 69 70 71 class schedule(object): 72 """Schedule for periodic task. 73 74 :param run_every: Interval in seconds (or a :class:`~datetime.timedelta`). 75 :param relative: If set to True the run time will be rounded to the 76 resolution of the interval. 77 :param nowfun: Function returning the current date and time 78 (class:`~datetime.datetime`). 79 :param app: Celery app instance. 80 81 """ 82 relative = False 83 84 def __init__(self, run_every=None, relative=False, nowfun=None, app=None): 85 self.run_every = maybe_timedelta(run_every) 86 self.relative = relative 87 self.nowfun = nowfun 88 self._app = app 89 90 def now(self): 91 return (self.nowfun or self.app.now)() 92 93 def remaining_estimate(self, last_run_at): 94 return remaining( 95 self.maybe_make_aware(last_run_at), self.run_every, 96 self.maybe_make_aware(self.now()), self.relative, 97 ) 98 99 def is_due(self, last_run_at): 100 """Returns tuple of two items `(is_due, next_time_to_check)`, 101 where next time to check is in seconds. 102 103 e.g. 104 105 * `(True, 20)`, means the task should be run now, and the next 106 time to check is in 20 seconds. 107 108 * `(False, 12.3)`, means the task is not due, but that the scheduler 109 should check again in 12.3 seconds. 110 111 The next time to check is used to save energy/cpu cycles, 112 it does not need to be accurate but will influence the precision 113 of your schedule. You must also keep in mind 114 the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, 115 which decides the maximum number of seconds the scheduler can 116 sleep between re-checking the periodic task intervals. So if you 117 have a task that changes schedule at runtime then your next_run_at 118 check will decide how long it will take before a change to the 119 schedule takes effect. The max loop interval takes precendence 120 over the next check at value returned. 121 122 .. admonition:: Scheduler max interval variance 123 124 The default max loop interval may vary for different schedulers. 125 For the default scheduler the value is 5 minutes, but for e.g. 126 the django-celery database scheduler the value is 5 seconds. 127 128 """ 129 last_run_at = self.maybe_make_aware(last_run_at) 130 rem_delta = self.remaining_estimate(last_run_at) 131 remaining_s = max(rem_delta.total_seconds(), 0) 132 if remaining_s == 0: 133 return schedstate(is_due=True, next=self.seconds) 134 return schedstate(is_due=False, next=remaining_s) 135 136 def maybe_make_aware(self, dt): 137 if self.utc_enabled: 138 return maybe_make_aware(dt, self.tz) 139 return dt 140 141 def __repr__(self): 142 return '<freq: {0.human_seconds}>'.format(self) 143 144 def __eq__(self, other): 145 if isinstance(other, schedule): 146 return self.run_every == other.run_every 147 return self.run_every == other 148 149 def __ne__(self, other): 150 return not self.__eq__(other) 151 152 def __reduce__(self): 153 return self.__class__, (self.run_every, self.relative, self.nowfun) 154 155 @property 156 def seconds(self): 157 return max(self.run_every.total_seconds(), 0) 158 159 @property 160 def human_seconds(self): 161 return humanize_seconds(self.seconds) 162 163 @property 164 def app(self): 165 return self._app or current_app._get_current_object() 166 167 @app.setter # noqa 168 def app(self, app): 169 self._app = app 170 171 @cached_property 172 def tz(self): 173 return self.app.timezone 174 175 @cached_property 176 def utc_enabled(self): 177 return self.app.conf.CELERY_ENABLE_UTC 178 179 def to_local(self, dt): 180 if not self.utc_enabled: 181 return timezone.to_local_fallback(dt) 182 return dt 183 184 185 class crontab_parser(object): 186 """Parser for crontab expressions. Any expression of the form 'groups' 187 (see BNF grammar below) is accepted and expanded to a set of numbers. 188 These numbers represent the units of time that the crontab needs to 189 run on:: 190 191 digit :: '0'..'9' 192 dow :: 'a'..'z' 193 number :: digit+ | dow+ 194 steps :: number 195 range :: number ( '-' number ) ? 196 numspec :: '*' | range 197 expr :: numspec ( '/' steps ) ? 198 groups :: expr ( ',' expr ) * 199 200 The parser is a general purpose one, useful for parsing hours, minutes and 201 day_of_week expressions. Example usage:: 202 203 >>> minutes = crontab_parser(60).parse('*/15') 204 [0, 15, 30, 45] 205 >>> hours = crontab_parser(24).parse('*/4') 206 [0, 4, 8, 12, 16, 20] 207 >>> day_of_week = crontab_parser(7).parse('*') 208 [0, 1, 2, 3, 4, 5, 6] 209 210 It can also parse day_of_month and month_of_year expressions if initialized 211 with an minimum of 1. Example usage:: 212 213 >>> days_of_month = crontab_parser(31, 1).parse('*/3') 214 [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31] 215 >>> months_of_year = crontab_parser(12, 1).parse('*/2') 216 [1, 3, 5, 7, 9, 11] 217 >>> months_of_year = crontab_parser(12, 1).parse('2-12/2') 218 [2, 4, 6, 8, 10, 12] 219 220 The maximum possible expanded value returned is found by the formula:: 221 222 max_ + min_ - 1 223 224 """ 225 ParseException = ParseException 226 227 _range = r'(\w+?)-(\w+)' 228 _steps = r'/(\w+)?' 229 _star = r'\*' 230 231 def __init__(self, max_=60, min_=0): 232 self.max_ = max_ 233 self.min_ = min_ 234 self.pats = ( 235 (re.compile(self._range + self._steps), self._range_steps), 236 (re.compile(self._range), self._expand_range), 237 (re.compile(self._star + self._steps), self._star_steps), 238 (re.compile('^' + self._star + '$'), self._expand_star), 239 ) 240 241 def parse(self, spec): 242 acc = set() 243 for part in spec.split(','): 244 if not part: 245 raise self.ParseException('empty part') 246 acc |= set(self._parse_part(part)) 247 return acc 248 249 def _parse_part(self, part): 250 for regex, handler in self.pats: 251 m = regex.match(part) 252 if m: 253 return handler(m.groups()) 254 return self._expand_range((part, )) 255 256 def _expand_range(self, toks): 257 fr = self._expand_number(toks[0]) 258 if len(toks) > 1: 259 to = self._expand_number(toks[1]) 260 if to < fr: # Wrap around max_ if necessary 261 return (list(range(fr, self.min_ + self.max_)) + 262 list(range(self.min_, to + 1))) 263 return list(range(fr, to + 1)) 264 return [fr] 265 266 def _range_steps(self, toks): 267 if len(toks) != 3 or not toks[2]: 268 raise self.ParseException('empty filter') 269 return self._expand_range(toks[:2])[::int(toks[2])] 270 271 def _star_steps(self, toks): 272 if not toks or not toks[0]: 273 raise self.ParseException('empty filter') 274 return self._expand_star()[::int(toks[0])] 275 276 def _expand_star(self, *args): 277 return list(range(self.min_, self.max_ + self.min_)) 278 279 def _expand_number(self, s): 280 if isinstance(s, string_t) and s[0] == '-': 281 raise self.ParseException('negative numbers not supported') 282 try: 283 i = int(s) 284 except ValueError: 285 try: 286 i = weekday(s) 287 except KeyError: 288 raise ValueError('Invalid weekday literal {0!r}.'.format(s)) 289 290 max_val = self.min_ + self.max_ - 1 291 if i > max_val: 292 raise ValueError( 293 'Invalid end range: {0} > {1}.'.format(i, max_val)) 294 if i < self.min_: 295 raise ValueError( 296 'Invalid beginning range: {0} < {1}.'.format(i, self.min_)) 297 298 return i 299 300 301 class crontab(schedule): 302 """A crontab can be used as the `run_every` value of a 303 :class:`PeriodicTask` to add cron-like scheduling. 304 305 Like a :manpage:`cron` job, you can specify units of time of when 306 you would like the task to execute. It is a reasonably complete 307 implementation of cron's features, so it should provide a fair 308 degree of scheduling needs. 309 310 You can specify a minute, an hour, a day of the week, a day of the 311 month, and/or a month in the year in any of the following formats: 312 313 .. attribute:: minute 314 315 - A (list of) integers from 0-59 that represent the minutes of 316 an hour of when execution should occur; or 317 - A string representing a crontab pattern. This may get pretty 318 advanced, like `minute='*/15'` (for every quarter) or 319 `minute='1,13,30-45,50-59/2'`. 320 321 .. attribute:: hour 322 323 - A (list of) integers from 0-23 that represent the hours of 324 a day of when execution should occur; or 325 - A string representing a crontab pattern. This may get pretty 326 advanced, like `hour='*/3'` (for every three hours) or 327 `hour='0,8-17/2'` (at midnight, and every two hours during 328 office hours). 329 330 .. attribute:: day_of_week 331 332 - A (list of) integers from 0-6, where Sunday = 0 and Saturday = 333 6, that represent the days of a week that execution should 334 occur. 335 - A string representing a crontab pattern. This may get pretty 336 advanced, like `day_of_week='mon-fri'` (for weekdays only). 337 (Beware that `day_of_week='*/2'` does not literally mean 338 'every two days', but 'every day that is divisible by two'!) 339 340 .. attribute:: day_of_month 341 342 - A (list of) integers from 1-31 that represents the days of the 343 month that execution should occur. 344 - A string representing a crontab pattern. This may get pretty 345 advanced, such as `day_of_month='2-30/3'` (for every even 346 numbered day) or `day_of_month='1-7,15-21'` (for the first and 347 third weeks of the month). 348 349 .. attribute:: month_of_year 350 351 - A (list of) integers from 1-12 that represents the months of 352 the year during which execution can occur. 353 - A string representing a crontab pattern. This may get pretty 354 advanced, such as `month_of_year='*/3'` (for the first month 355 of every quarter) or `month_of_year='2-12/2'` (for every even 356 numbered month). 357 358 .. attribute:: nowfun 359 360 Function returning the current date and time 361 (:class:`~datetime.datetime`). 362 363 .. attribute:: app 364 365 The Celery app instance. 366 367 It is important to realize that any day on which execution should 368 occur must be represented by entries in all three of the day and 369 month attributes. For example, if `day_of_week` is 0 and `day_of_month` 370 is every seventh day, only months that begin on Sunday and are also 371 in the `month_of_year` attribute will have execution events. Or, 372 `day_of_week` is 1 and `day_of_month` is '1-7,15-21' means every 373 first and third monday of every month present in `month_of_year`. 374 375 """ 376 377 def __init__(self, minute='*', hour='*', day_of_week='*', 378 day_of_month='*', month_of_year='*', nowfun=None, app=None): 379 self._orig_minute = cronfield(minute) 380 self._orig_hour = cronfield(hour) 381 self._orig_day_of_week = cronfield(day_of_week) 382 self._orig_day_of_month = cronfield(day_of_month) 383 self._orig_month_of_year = cronfield(month_of_year) 384 self.hour = self._expand_cronspec(hour, 24) 385 self.minute = self._expand_cronspec(minute, 60) 386 self.day_of_week = self._expand_cronspec(day_of_week, 7) 387 self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) 388 self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) 389 self.nowfun = nowfun 390 self._app = app 391 392 @staticmethod 393 def _expand_cronspec(cronspec, max_, min_=0): 394 """Takes the given cronspec argument in one of the forms:: 395 396 int (like 7) 397 str (like '3-5,*/15', '*', or 'monday') 398 set (like {0,15,30,45} 399 list (like [8-17]) 400 401 And convert it to an (expanded) set representing all time unit 402 values on which the crontab triggers. Only in case of the base 403 type being 'str', parsing occurs. (It is fast and 404 happens only once for each crontab instance, so there is no 405 significant performance overhead involved.) 406 407 For the other base types, merely Python type conversions happen. 408 409 The argument `max_` is needed to determine the expansion of '*' 410 and ranges. 411 The argument `min_` is needed to determine the expansion of '*' 412 and ranges for 1-based cronspecs, such as day of month or month 413 of year. The default is sufficient for minute, hour, and day of 414 week. 415 416 """ 417 if isinstance(cronspec, numbers.Integral): 418 result = {cronspec} 419 elif isinstance(cronspec, string_t): 420 result = crontab_parser(max_, min_).parse(cronspec) 421 elif isinstance(cronspec, set): 422 result = cronspec 423 elif is_iterable(cronspec): 424 result = set(cronspec) 425 else: 426 raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec))) 427 428 # assure the result does not preceed the min or exceed the max 429 for number in result: 430 if number >= max_ + min_ or number < min_: 431 raise ValueError(CRON_PATTERN_INVALID.format( 432 min=min_, max=max_ - 1 + min_, value=number)) 433 return result 434 435 def _delta_to_next(self, last_run_at, next_hour, next_minute): 436 """ 437 Takes a datetime of last run, next minute and hour, and 438 returns a relativedelta for the next scheduled day and time. 439 Only called when day_of_month and/or month_of_year cronspec 440 is specified to further limit scheduled task execution. 441 """ 442 from bisect import bisect, bisect_left 443 444 datedata = AttributeDict(year=last_run_at.year) 445 days_of_month = sorted(self.day_of_month) 446 months_of_year = sorted(self.month_of_year) 447 448 def day_out_of_range(year, month, day): 449 try: 450 datetime(year=year, month=month, day=day) 451 except ValueError: 452 return True 453 return False 454 455 def roll_over(): 456 while 1: 457 flag = (datedata.dom == len(days_of_month) or 458 day_out_of_range(datedata.year, 459 months_of_year[datedata.moy], 460 days_of_month[datedata.dom]) or 461 (self.maybe_make_aware(datetime(datedata.year, 462 months_of_year[datedata.moy], 463 days_of_month[datedata.dom])) < last_run_at)) 464 465 if flag: 466 datedata.dom = 0 467 datedata.moy += 1 468 if datedata.moy == len(months_of_year): 469 datedata.moy = 0 470 datedata.year += 1 471 else: 472 break 473 474 if last_run_at.month in self.month_of_year: 475 datedata.dom = bisect(days_of_month, last_run_at.day) 476 datedata.moy = bisect_left(months_of_year, last_run_at.month) 477 else: 478 datedata.dom = 0 479 datedata.moy = bisect(months_of_year, last_run_at.month) 480 if datedata.moy == len(months_of_year): 481 datedata.moy = 0 482 roll_over() 483 484 while 1: 485 th = datetime(year=datedata.year, 486 month=months_of_year[datedata.moy], 487 day=days_of_month[datedata.dom]) 488 if th.isoweekday() % 7 in self.day_of_week: 489 break 490 datedata.dom += 1 491 roll_over() 492 493 return ffwd(year=datedata.year, 494 month=months_of_year[datedata.moy], 495 day=days_of_month[datedata.dom], 496 hour=next_hour, 497 minute=next_minute, 498 second=0, 499 microsecond=0) 500 501 def now(self): 502 return (self.nowfun or self.app.now)() 503 504 def __repr__(self): 505 return CRON_REPR.format(self) 506 507 def __reduce__(self): 508 return (self.__class__, (self._orig_minute, 509 self._orig_hour, 510 self._orig_day_of_week, 511 self._orig_day_of_month, 512 self._orig_month_of_year), None) 513 514 def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): 515 tz = tz or self.tz 516 last_run_at = self.maybe_make_aware(last_run_at) 517 now = self.maybe_make_aware(self.now()) 518 dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7 519 520 execute_this_date = (last_run_at.month in self.month_of_year and 521 last_run_at.day in self.day_of_month and 522 dow_num in self.day_of_week) 523 524 execute_this_hour = (execute_this_date and 525 last_run_at.day == now.day and 526 last_run_at.month == now.month and 527 last_run_at.year == now.year and 528 last_run_at.hour in self.hour and 529 last_run_at.minute < max(self.minute)) 530 531 if execute_this_hour: 532 next_minute = min(minute for minute in self.minute 533 if minute > last_run_at.minute) 534 delta = ffwd(minute=next_minute, second=0, microsecond=0) 535 else: 536 next_minute = min(self.minute) 537 execute_today = (execute_this_date and 538 last_run_at.hour < max(self.hour)) 539 540 if execute_today: 541 next_hour = min(hour for hour in self.hour 542 if hour > last_run_at.hour) 543 delta = ffwd(hour=next_hour, minute=next_minute, 544 second=0, microsecond=0) 545 else: 546 next_hour = min(self.hour) 547 all_dom_moy = (self._orig_day_of_month == '*' and 548 self._orig_month_of_year == '*') 549 if all_dom_moy: 550 next_day = min([day for day in self.day_of_week 551 if day > dow_num] or self.day_of_week) 552 add_week = next_day == dow_num 553 554 delta = ffwd(weeks=add_week and 1 or 0, 555 weekday=(next_day - 1) % 7, 556 hour=next_hour, 557 minute=next_minute, 558 second=0, 559 microsecond=0) 560 else: 561 delta = self._delta_to_next(last_run_at, 562 next_hour, next_minute) 563 return self.to_local(last_run_at), delta, self.to_local(now) 564 565 def remaining_estimate(self, last_run_at, ffwd=ffwd): 566 """Returns when the periodic task should run next as a timedelta.""" 567 return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd)) 568 569 def is_due(self, last_run_at): 570 """Returns tuple of two items `(is_due, next_time_to_run)`, 571 where next time to run is in seconds. 572 573 See :meth:`celery.schedules.schedule.is_due` for more information. 574 575 """ 576 rem_delta = self.remaining_estimate(last_run_at) 577 rem = max(rem_delta.total_seconds(), 0) 578 due = rem == 0 579 if due: 580 rem_delta = self.remaining_estimate(self.now()) 581 rem = max(rem_delta.total_seconds(), 0) 582 return schedstate(due, rem) 583 584 def __eq__(self, other): 585 if isinstance(other, crontab): 586 return (other.month_of_year == self.month_of_year and 587 other.day_of_month == self.day_of_month and 588 other.day_of_week == self.day_of_week and 589 other.hour == self.hour and 590 other.minute == self.minute) 591 return NotImplemented 592 593 def __ne__(self, other): 594 return not self.__eq__(other) 595 596 597 def maybe_schedule(s, relative=False, app=None): 598 if s is not None: 599 if isinstance(s, numbers.Integral): 600 s = timedelta(seconds=s) 601 if isinstance(s, timedelta): 602 return schedule(s, relative, app=app) 603 else: 604 s.app = app 605 return s 606 607 608 class solar(schedule): 609 """A solar event can be used as the `run_every` value of a 610 :class:`PeriodicTask` to schedule based on certain solar events. 611 612 :param event: Solar event that triggers this task. Available 613 values are: dawn_astronomical, dawn_nautical, dawn_civil, 614 sunrise, solar_noon, sunset, dusk_civil, dusk_nautical, 615 dusk_astronomical 616 :param lat: The latitude of the observer. 617 :param lon: The longitude of the observer. 618 :param nowfun: Function returning the current date and time 619 (class:`~datetime.datetime`). 620 :param app: Celery app instance. 621 """ 622 623 _all_events = [ 624 'dawn_astronomical', 625 'dawn_nautical', 626 'dawn_civil', 627 'sunrise', 628 'solar_noon', 629 'sunset', 630 'dusk_civil', 631 'dusk_nautical', 632 'dusk_astronomical', 633 ] 634 _horizons = { 635 'dawn_astronomical': '-18', 636 'dawn_nautical': '-12', 637 'dawn_civil': '-6', 638 'sunrise': '-0:34', 639 'solar_noon': '0', 640 'sunset': '-0:34', 641 'dusk_civil': '-6', 642 'dusk_nautical': '-12', 643 'dusk_astronomical': '18', 644 } 645 _methods = { 646 'dawn_astronomical': 'next_rising', 647 'dawn_nautical': 'next_rising', 648 'dawn_civil': 'next_rising', 649 'sunrise': 'next_rising', 650 'solar_noon': 'next_transit', 651 'sunset': 'next_setting', 652 'dusk_civil': 'next_setting', 653 'dusk_nautical': 'next_setting', 654 'dusk_astronomical': 'next_setting', 655 } 656 _use_center_l = { 657 'dawn_astronomical': True, 658 'dawn_nautical': True, 659 'dawn_civil': True, 660 'sunrise': False, 661 'solar_noon': True, 662 'sunset': False, 663 'dusk_civil': True, 664 'dusk_nautical': True, 665 'dusk_astronomical': True, 666 } 667 668 def __init__(self, event, lat, lon, nowfun=None, app=None): 669 self.ephem = __import__('ephem') 670 self.event = event 671 self.lat = lat 672 self.lon = lon 673 self.nowfun = nowfun 674 self._app = app 675 676 if event not in self._all_events: 677 raise ValueError(SOLAR_INVALID_EVENT.format( 678 event=event, all_events=', '.join(self._all_events), 679 )) 680 if lat < -90 or lat > 90: 681 raise ValueError(SOLAR_INVALID_LATITUDE.format(lat=lat)) 682 if lon < -180 or lon > 180: 683 raise ValueError(SOLAR_INVALID_LONGITUDE.format(lon=lon)) 684 685 cal = self.ephem.Observer() 686 cal.lat = str(lat) 687 cal.lon = str(lon) 688 cal.elev = 0 689 cal.horizon = self._horizons[event] 690 cal.pressure = 0 691 self.cal = cal 692 693 self.method = self._methods[event] 694 self.use_center = self._use_center_l[event] 695 696 def now(self): 697 return (self.nowfun or self.app.now)() 698 699 def __reduce__(self): 700 return (self.__class__, ( 701 self.event, self.lat, self.lon), None) 702 703 def __repr__(self): 704 return '<solar: {0} at latitude {1}, longitude: {2}>'.format( 705 self.event, self.lat, self.lon, 706 ) 707 708 def remaining_estimate(self, last_run_at): 709 """Returns when the periodic task should run next as a timedelta, 710 or if it shouldn't run today (e.g. the sun does not rise today), 711 returns the time when the next check should take place.""" 712 last_run_at = self.maybe_make_aware(last_run_at) 713 last_run_at_utc = localize(last_run_at, timezone.utc) 714 self.cal.date = last_run_at_utc 715 try: 716 next_utc = getattr(self.cal, self.method)( 717 self.ephem.Sun(), 718 start=last_run_at_utc, use_center=self.use_center, 719 ) 720 except self.ephem.CircumpolarError: 721 """Sun will not rise/set today. Check again tomorrow 722 (specifically, after the next anti-transit).""" 723 next_utc = ( 724 self.cal.next_antitransit(self.ephem.Sun()) + 725 timedelta(minutes=1) 726 ) 727 next = self.maybe_make_aware(next_utc.datetime()) 728 now = self.maybe_make_aware(self.now()) 729 delta = next - now 730 return delta 731 732 def is_due(self, last_run_at): 733 """Returns tuple of two items `(is_due, next_time_to_run)`, 734 where next time to run is in seconds. 735 736 See :meth:`celery.schedules.schedule.is_due` for more information. 737 738 """ 739 rem_delta = self.remaining_estimate(last_run_at) 740 rem = max(rem_delta.total_seconds(), 0) 741 due = rem == 0 742 if due: 743 rem_delta = self.remaining_estimate(self.now()) 744 rem = max(rem_delta.total_seconds(), 0) 745 return schedstate(due, rem) 746 747 def __eq__(self, other): 748 if isinstance(other, solar): 749 return (other.event == self.event and 750 other.lat == self.lat and 751 other.lon == self.lon) 752 return NotImplemented 753 754 def __ne__(self, other): 755 return not self.__eq__(other) 756 [end of celery/schedules.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
celery/celery
6bf4664e076c4d8b6d728190802124aa5c112c5d
Celerybeat runs periodic tasks every 5 seconds regardless of interval I recently upgraded to celery 3 and have been experiencing some strange behavior with celerybeat, which was acting normally before the upgrade. My environment: python 2.7.3 django 1.4.1 virtualenv django-celery 3.0.6 celery 3.0.6 Regardless of what interval I define for my periodic tasks - which I adjust through the django admin - celerybeat fires off the periodic tasks every 5 seconds. This occurs regardless of the interval. Interestingly, however, when the periodic task is set disabled, celerybeat stops sending tasks, so it must be responsive to the scheduler at some level.
Could you try upgrading to celery 3.0.7? Also please delete an existing `celerybeat-schedule` file if any. Thanks - Will try that and update later today. On Tue, Aug 28, 2012 at 12:02 PM, Ask Solem Hoel notifications@github.comwrote: > Could you try upgrading to celery 3.0.7? Also please delete an existing > celerybeat-schedule file if any. > > β€” > Reply to this email directly or view it on GitHubhttps://github.com/celery/celery/issues/943#issuecomment-8096911. I'm having the same problem on 3.0.7 (django-celery 3.0.6), redis broker. ``` from celery.task import periodic_task @periodic_task(run_every=crontab(hour="*", minute="0", day_of_week="*"), ignore_result=True) def my_test(): pass ``` Just to confirm - I have installed celery 3.0.7 now (although django-celery is still 3.0.6) and am still having the same issues. I am also using Redis as a broker. I have even dropped the DB, in case it was something caused by my south migrations, yet the problem still persists. Please let me know if you'd like me to try anything else. On Tue, Aug 28, 2012 at 9:38 PM, erikcw notifications@github.com wrote: > I'm having the same problem on 3.0.7 (django-celery 3.0.6), redis broker. > > from celery.task import periodic_task > > @periodic_task(run_every=crontab(hour="_", minute="0", day_of_week="_"), ignore_result=True) > def my_test(): > pass > > β€” > Reply to this email directly or view it on GitHubhttps://github.com/celery/celery/issues/943#issuecomment-8112944. I have upgraded to 3.0.8 and the problem still occurs. Issue should be reopened. Note, anyone using the database scheduler have to reset the `last_run_at` fields for each periodic task: ``` $ python manage.py shell >>> from djcelery.models import PeriodicTask >>> PeriodicTask.objects.update(last_run_at=None) ``` I have a task scheduled for 04:00 AM and found that it's executed 2151 times (every 5 seconds) from 07:00 АМ. to 10:00 АМ and then it stops. Time here is GMT+3 Also: python 2.7.3 django 1.4.1 (USE_TZ = True) virtualenv django-celery 3.0.6 celery 3.0.6 last_run_at is None Has anyone with similar problem? I have one. But in my case task runs every 5 minutes instead of run once a day. ``` Python3.4 celery==3.1.17 Django==1.8 ``` settings.py: ``` from celery.schedules import crontab USE_TZ = True TIME_ZONE = 'Europe/Moscow' CELERY_ENABLE_UTC = False CELERY_TIMEZONE = TIME_ZONE CELERYBEAT_SCHEDULE = { 'every-day': { 'task': 'app.tasks.every_day', 'schedule': crontab(minute=30, hour=0), }, } ``` Celery works under supervisord: ``` celery worker -A proj -l info --concurrency=2 -Ofair celery beat -A proj -l info ``` After 00:30 at midnight Celery sends task first time and then keep firing it until 03:30. I'm reopening this issue due to @monax's report. Can you please provide steps to reproduce? @thedrow Wait a couple of days, please. I'll try to make mini project to reproduce issue. Wonderful! thanks. https://github.com/monax/celeryissue In repo there is requirements.txt with all dependencies. As a broker I use rabbitmq. In settings.py there is a code which schedules a task to next minute after start. ``` from celery.schedules import crontab from datetime import datetime USE_TZ = True TIME_ZONE = 'Europe/Moscow' CELERY_ENABLE_UTC = False CELERY_TIMEZONE = TIME_ZONE t = datetime.today() ONE_MINUTE_AFTER = crontab(minute=t.minute + 1, hour=t.hour) CELERYBEAT_SCHEDULE = { 'every-day': { 'task': 'apptask.tasks.every_day', 'schedule': ONE_MINUTE_AFTER, }, } ``` Create a new virtualenv using pyvenv (python 3.4.3). Install requirements.txt and run in two terminals: ``` celery worker -A celeryissue -l info --concurrency=2 -Ofair celery beat -A celeryissue -l info ``` For beat you'll see: ``` celery beat -A celeryissue -l info celery beat v3.1.17 (Cipater) is starting. __ - ... __ - _ Configuration -> . broker -> amqp://guest:**@localhost:5672// . loader -> celery.loaders.app.AppLoader . scheduler -> celery.beat.PersistentScheduler . db -> celerybeat-schedule . logfile -> [stderr]@%INFO . maxinterval -> now (0s) [2015-06-15 12:35:50,911: INFO/MainProcess] beat: Starting... [2015-06-15 12:36:00,000: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-15 12:41:00,103: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-15 12:46:00,193: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) ``` For worker you'll see something like that: ``` [2015-06-15 12:36:00,009: INFO/MainProcess] Received task: apptask.tasks.every_day[c23dd76b-1181-4115-9379-d04e69879c49] [2015-06-15 12:36:00,013: INFO/Worker-2] Fire: 2015-06-15 12:36:00.013325 [2015-06-15 12:36:00,015: INFO/MainProcess] Task apptask.tasks.every_day[c23dd76b-1181-4115-9379-d04e69879c49] succeeded in 0.0025641939992055995s: None [2015-06-15 12:41:00,106: INFO/MainProcess] Received task: apptask.tasks.every_day[2d746b9e-4b83-421b-8cde-a75f6a537fc1] [2015-06-15 12:41:00,108: INFO/Worker-1] Fire: 2015-06-15 12:41:00.108269 [2015-06-15 12:41:00,110: INFO/MainProcess] Task apptask.tasks.every_day[2d746b9e-4b83-421b-8cde-a75f6a537fc1] succeeded in 0.0021385390009527327s: None [2015-06-15 12:46:00,196: INFO/MainProcess] Received task: apptask.tasks.every_day[8487f014-98f2-444a-91da-36621a5e7edc] [2015-06-15 12:46:00,198: INFO/Worker-2] Fire: 2015-06-15 12:46:00.197949 [2015-06-15 12:46:00,198: INFO/MainProcess] Task apptask.tasks.every_day[8487f014-98f2-444a-91da-36621a5e7edc] succeeded in 0.0014146600005915388s: None ``` I'll run it and let you know. I can reproduce it with `celery beat -A celeryissue -l debug --max-interval=5` ``` $ celery beat -A celeryissue -l debug --max-interval=5 celery beat v3.1.17 (Cipater) is starting. __ - ... __ - _ Configuration -> . broker -> amqp://guest:**@localhost:5672// . loader -> celery.loaders.app.AppLoader . scheduler -> celery.beat.PersistentScheduler . db -> celerybeat-schedule . logfile -> [stderr]@%DEBUG . maxinterval -> 5.00 seconds (5.0s) [2015-06-16 20:48:56,236: DEBUG/MainProcess] Setting default socket timeout to 30 [2015-06-16 20:48:56,236: INFO/MainProcess] beat: Starting... [2015-06-16 20:48:56,249: DEBUG/MainProcess] Current schedule: <Entry: celery.backend_cleanup celery.backend_cleanup() <crontab: 0 4 * * * (m/h/d/dM/MY)> <Entry: every-day apptask.tasks.every_day() <crontab: 49 20 * * * (m/h/d/dM/MY)> [2015-06-16 20:48:56,249: DEBUG/MainProcess] beat: Ticking with max interval->5.00 seconds [2015-06-16 20:48:56,257: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'capabilities': {'basic.nack': True, 'connection.blocked': True, 'publisher_confirms': True, 'per_consumer_qos': True, 'authentication_failure_close': True, 'consumer_priorities': True, 'consumer_cancel_notify': True, 'exchange_exchange_bindings': True}, 'version': '3.5.3', 'copyright': 'Copyright (C) 2007-2014 GoPivotal, Inc.', 'information': 'Licensed under the MPL. See http://www.rabbitmq.com/', 'cluster_name': 'rabbit@37cfe74621ef', 'platform': 'Erlang/OTP', 'product': 'RabbitMQ'}, mechanisms: ['AMQPLAIN', 'PLAIN'], locales: ['en_US'] [2015-06-16 20:48:56,258: DEBUG/MainProcess] Open OK! [2015-06-16 20:48:56,269: DEBUG/MainProcess] beat: Waking up in 3.72 seconds. [2015-06-16 20:49:00,000: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:00,001: DEBUG/MainProcess] using channel_id: 1 [2015-06-16 20:49:00,002: DEBUG/MainProcess] Channel open [2015-06-16 20:49:00,004: DEBUG/MainProcess] beat: Synchronizing schedule... [2015-06-16 20:49:00,012: DEBUG/MainProcess] apptask.tasks.every_day sent. id->640586be-bca2-49b4-ab5e-5f70d1477fe7 [2015-06-16 20:49:00,012: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:05,008: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:05,008: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a415c0f0-a254-49b3-9272-754d7a3f283e [2015-06-16 20:49:05,008: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:10,004: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:10,005: DEBUG/MainProcess] apptask.tasks.every_day sent. id->3906d59d-2726-49b3-8f2e-fdf339945337 [2015-06-16 20:49:10,005: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:15,001: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:15,001: DEBUG/MainProcess] apptask.tasks.every_day sent. id->8b525ba8-c569-4650-a209-859568ef62b7 [2015-06-16 20:49:15,001: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:19,997: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:19,998: DEBUG/MainProcess] apptask.tasks.every_day sent. id->02d2547b-71c8-4efc-9bb3-8daa89d6f65b [2015-06-16 20:49:19,998: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:24,993: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:24,994: DEBUG/MainProcess] apptask.tasks.every_day sent. id->c29ef47e-d72e-4b95-a88d-fe9766229005 [2015-06-16 20:49:24,994: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:29,990: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:29,990: DEBUG/MainProcess] apptask.tasks.every_day sent. id->ba9f425c-623a-45de-9801-893e4b33f5ac [2015-06-16 20:49:29,991: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:34,986: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:34,987: DEBUG/MainProcess] apptask.tasks.every_day sent. id->ee62e647-12c3-40dd-9360-d6607f7497b2 [2015-06-16 20:49:34,987: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:39,983: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:39,984: DEBUG/MainProcess] apptask.tasks.every_day sent. id->45d9c85e-0463-45bb-9809-7475cf107186 [2015-06-16 20:49:39,984: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:44,979: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:44,980: DEBUG/MainProcess] apptask.tasks.every_day sent. id->8229bf8b-67d6-4dc2-89a0-ffef5e7f3699 [2015-06-16 20:49:44,980: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:49,976: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:49,977: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a9c797f7-1c1b-4677-8fa6-135c6f036235 [2015-06-16 20:49:49,977: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:54,972: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:54,973: DEBUG/MainProcess] apptask.tasks.every_day sent. id->2215efa0-7a45-40cc-af68-00b0cac55f4a [2015-06-16 20:49:54,973: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:49:59,969: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:49:59,970: DEBUG/MainProcess] apptask.tasks.every_day sent. id->0bdeca69-b145-401c-9ef0-c7731c52ce6d [2015-06-16 20:49:59,970: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:04,965: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:04,966: DEBUG/MainProcess] apptask.tasks.every_day sent. id->bad1b088-367a-4875-8594-272d3d2e6231 [2015-06-16 20:50:04,966: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:09,958: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:09,959: DEBUG/MainProcess] apptask.tasks.every_day sent. id->57d744d0-e5f1-4f29-9c7c-47e6af70a21d [2015-06-16 20:50:09,959: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:14,954: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:14,955: DEBUG/MainProcess] apptask.tasks.every_day sent. id->6f510b48-6127-479b-ac08-3e435e0691bb [2015-06-16 20:50:14,955: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:19,950: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:19,951: DEBUG/MainProcess] apptask.tasks.every_day sent. id->061bee73-3504-453e-90ee-7588b14f4147 [2015-06-16 20:50:19,951: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:24,946: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:24,947: DEBUG/MainProcess] apptask.tasks.every_day sent. id->eb744c6a-793b-4f9d-9d31-59c6559d3a5a [2015-06-16 20:50:24,947: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:29,942: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:29,942: DEBUG/MainProcess] apptask.tasks.every_day sent. id->9cf3ab91-1318-42e5-b577-059437487a4d [2015-06-16 20:50:29,943: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:34,938: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:34,939: DEBUG/MainProcess] apptask.tasks.every_day sent. id->f92e235b-1796-4b4b-8d3a-d9e386dbbbe3 [2015-06-16 20:50:34,939: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:39,935: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:39,935: DEBUG/MainProcess] apptask.tasks.every_day sent. id->63f4cfb7-03d4-4e59-add7-adc7e2c9654a [2015-06-16 20:50:39,935: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:44,931: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:44,932: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a9c2f85f-a383-4e6d-883a-bddba0e615c8 [2015-06-16 20:50:44,932: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:49,927: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:49,928: DEBUG/MainProcess] apptask.tasks.every_day sent. id->aa95f5b9-ff84-448f-93ac-eea659b12711 [2015-06-16 20:50:49,928: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:54,924: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:54,925: DEBUG/MainProcess] apptask.tasks.every_day sent. id->0ccd4878-4490-4254-ae8c-a29b7e2ddbd3 [2015-06-16 20:50:54,925: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:50:59,920: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:50:59,921: DEBUG/MainProcess] apptask.tasks.every_day sent. id->d5d8b0fd-11be-41a0-88e0-8ff04dad96f3 [2015-06-16 20:50:59,921: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:04,917: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:04,918: DEBUG/MainProcess] apptask.tasks.every_day sent. id->90129b78-cb02-4056-a523-1eff080962c3 [2015-06-16 20:51:04,918: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:09,909: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:09,910: DEBUG/MainProcess] apptask.tasks.every_day sent. id->57c50ee1-a8d1-413c-90eb-b81e1d1eba29 [2015-06-16 20:51:09,910: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:14,904: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:14,905: DEBUG/MainProcess] apptask.tasks.every_day sent. id->ec28d1f9-dd75-409e-ae0d-4eb65912586e [2015-06-16 20:51:14,905: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:19,896: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:19,897: DEBUG/MainProcess] apptask.tasks.every_day sent. id->adcc46af-3d32-4a7a-ae57-66ff1957db85 [2015-06-16 20:51:19,897: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:24,893: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:24,893: DEBUG/MainProcess] apptask.tasks.every_day sent. id->39fbe804-115e-4719-a50c-f35ead13fb54 [2015-06-16 20:51:24,894: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:29,889: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:29,889: DEBUG/MainProcess] apptask.tasks.every_day sent. id->a918ed3d-d6b2-4b1c-aaff-f226d5f2c5c7 [2015-06-16 20:51:29,890: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:34,881: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:34,882: DEBUG/MainProcess] apptask.tasks.every_day sent. id->9062eb9c-ef0c-48f0-abe5-48841ad7631d [2015-06-16 20:51:34,882: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:39,877: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:39,878: DEBUG/MainProcess] apptask.tasks.every_day sent. id->00d184c9-2c9a-445e-baa9-c443b3145e3e [2015-06-16 20:51:39,878: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:44,871: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:44,872: DEBUG/MainProcess] apptask.tasks.every_day sent. id->2f66aff9-af61-4f4d-a981-f84620da054c [2015-06-16 20:51:44,872: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:49,868: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:49,869: DEBUG/MainProcess] apptask.tasks.every_day sent. id->2cd78f61-58f0-4803-a36a-4d6801463128 [2015-06-16 20:51:49,869: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:54,864: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:54,865: DEBUG/MainProcess] apptask.tasks.every_day sent. id->f42bac9a-fef4-4e7a-a62b-b22fb7377550 [2015-06-16 20:51:54,865: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:51:59,856: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:51:59,857: DEBUG/MainProcess] apptask.tasks.every_day sent. id->11103d2c-92d2-41af-b0fa-69766e49a0d3 [2015-06-16 20:51:59,857: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:04,849: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:04,849: DEBUG/MainProcess] beat: Synchronizing schedule... [2015-06-16 20:52:04,861: DEBUG/MainProcess] apptask.tasks.every_day sent. id->bf70a0e7-b6c4-4a8b-beb9-9106e54cca4a [2015-06-16 20:52:04,861: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:09,857: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:09,858: DEBUG/MainProcess] apptask.tasks.every_day sent. id->f047b489-5680-4714-b20e-f64768045ee6 [2015-06-16 20:52:09,858: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:14,854: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:14,854: DEBUG/MainProcess] apptask.tasks.every_day sent. id->be6e557f-763f-4aca-a3bc-c4c5192c26bc [2015-06-16 20:52:14,855: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:19,850: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:19,852: DEBUG/MainProcess] apptask.tasks.every_day sent. id->78ffa59b-070f-40db-bb91-fd34748db4b8 [2015-06-16 20:52:19,852: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 20:52:24,845: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 20:52:24,846: DEBUG/MainProcess] apptask.tasks.every_day sent. id->c74c558e-860a-4d6c-a46d-4e91d950c3f9 [2015-06-16 20:52:24,846: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. ``` Can you please run your beat instance with debug log level and tell me what the interval is? @ask There's something really wrong here. Can you please take a look? I edited this issue since I can reproduce it. ~~Something is wrong with `celery.schedules.crontab.remaining_delta()`. It returns that the task was executed a day ago even though it wasn't.~~ Sorry not correct. See below. There's one problem with with the time zone setting that isn't being picked up from Django's TIME_ZONE setting which may or may not be by design. Changing Celery's time zone to my local time zone causes the bug to be reproduced later: ``` /home/omer/.virtualenvs/celeryissue3/bin/python /home/omer/.virtualenvs/celeryissue3/bin/celery beat -A celeryissue -l debug --max-interval 5 celery beat v3.1.17 (Cipater) is starting. __ - ... __ - _ [2015-06-16 21:34:13,892: DEBUG/MainProcess] Setting default socket timeout to 30 Configuration -> [2015-06-16 21:34:13,892: INFO/MainProcess] beat: Starting... . broker -> amqp://guest:**@localhost:5672// . loader -> celery.loaders.app.AppLoader . scheduler -> celery.beat.PersistentScheduler . db -> celerybeat-schedule . logfile -> [stderr]@%DEBUG . maxinterval -> 5.00 seconds (5.0s) [2015-06-16 21:34:13,899: DEBUG/MainProcess] Current schedule: <Entry: every-day apptask.tasks.every_day() <crontab: 35 21 * * * (m/h/d/dM/MY)> <Entry: celery.backend_cleanup celery.backend_cleanup() <crontab: 0 4 * * * (m/h/d/dM/MY)> [2015-06-16 21:34:13,900: DEBUG/MainProcess] beat: Ticking with max interval->5.00 seconds [2015-06-16 21:34:13,908: DEBUG/MainProcess] Start from server, version: 0.9, properties: {'cluster_name': 'rabbit@37cfe74621ef', 'version': '3.5.3', 'capabilities': {'per_consumer_qos': True, 'basic.nack': True, 'consumer_priorities': True, 'publisher_confirms': True, 'consumer_cancel_notify': True, 'authentication_failure_close': True, 'exchange_exchange_bindings': True, 'connection.blocked': True}, 'copyright': 'Copyright (C) 2007-2014 GoPivotal, Inc.', 'product': 'RabbitMQ', 'platform': 'Erlang/OTP', 'information': 'Licensed under the MPL. See http://www.rabbitmq.com/'}, mechanisms: ['AMQPLAIN', 'PLAIN'], locales: ['en_US'] [2015-06-16 21:34:13,909: DEBUG/MainProcess] Open OK! [2015-06-16 21:34:13,920: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:18,912: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:23,904: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:28,896: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:33,892: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:38,884: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:43,880: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:48,875: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:53,872: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:34:58,864: DEBUG/MainProcess] beat: Waking up in 1.13 seconds. [2015-06-16 21:35:00,000: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:00,003: DEBUG/MainProcess] using channel_id: 1 [2015-06-16 21:35:00,005: DEBUG/MainProcess] Channel open [2015-06-16 21:35:00,009: DEBUG/MainProcess] beat: Synchronizing schedule... [2015-06-16 21:35:00,017: DEBUG/MainProcess] apptask.tasks.every_day sent. id->c5a94c8f-1981-4de7-b880-115de8248cb4 [2015-06-16 21:35:00,018: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:35:05,014: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:05,014: DEBUG/MainProcess] apptask.tasks.every_day sent. id->bfaba44b-cc10-49b3-8ba7-57b471f4ef7a [2015-06-16 21:35:05,015: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:35:10,010: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:10,011: DEBUG/MainProcess] apptask.tasks.every_day sent. id->6b07e184-f6fb-4fc5-927b-9e1d733bae8a [2015-06-16 21:35:10,012: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. [2015-06-16 21:35:15,005: INFO/MainProcess] Scheduler: Sending due task every-day (apptask.tasks.every_day) [2015-06-16 21:35:15,007: DEBUG/MainProcess] apptask.tasks.every_day sent. id->8865e9d3-ba83-4a4a-bbd3-ce0b7eb3389d [2015-06-16 21:35:15,007: DEBUG/MainProcess] beat: Waking up in 4.99 seconds. ``` There's definitely a bug somewhere in `celery.schedules.crontab.remaining_delta()` or surrounding it and it is related to timezones. I just have no idea what it is exactly. The bug does not seem to reproduce when I specify `CELERY_ENABLE_UTC=True` so there's might be a workaround for this bug. I'll keep the process running overnight to ensure that it doesn't reproduce if CELERY_ENABLE_UTC is set to true. @monax If you'd like to assist please set a breakpoint at that function and see how it's calculated. @thedrow I forgot one litle detail. If I try to run task 00.10 at midnight then task will be fire every 5 minutes until 03.10 after midnight. I'll run it with debug log level and let you know. Ok it's verified. The bug does not reproduce if CELERY_UTC_ENABLE is set to true. I'm going to try with master later tonight.
2015-06-19T00:01:16Z
<patch> diff --git a/celery/schedules.py b/celery/schedules.py --- a/celery/schedules.py +++ b/celery/schedules.py @@ -134,9 +134,7 @@ def is_due(self, last_run_at): return schedstate(is_due=False, next=remaining_s) def maybe_make_aware(self, dt): - if self.utc_enabled: - return maybe_make_aware(dt, self.tz) - return dt + return maybe_make_aware(dt, self.tz) def __repr__(self): return '<freq: {0.human_seconds}>'.format(self) </patch>
diff --git a/celery/tests/app/test_beat.py b/celery/tests/app/test_beat.py --- a/celery/tests/app/test_beat.py +++ b/celery/tests/app/test_beat.py @@ -521,7 +521,7 @@ def test_maybe_make_aware(self): self.assertTrue(d.tzinfo) x.utc_enabled = False d2 = x.maybe_make_aware(datetime.utcnow()) - self.assertIsNone(d2.tzinfo) + self.assertTrue(d2.tzinfo) def test_to_local(self): x = schedule(10, app=self.app)
1.0
NVIDIA__NeMo-5724
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> EMA Doesn't delete previous EMA ckpts when k > 0 for checkpointing **Describe the bug** EMA saves a separate EMA based ckpt when normally saving. The problem is when saving a set amount of checkpoints (i.e k=5) the older EMA ckpts are not being deleted. This means there is an excess of EMA based ckpts. What is expected is that older EMA ckpts are deleted. **Proposed Solution** Overriding the code is tricky, as the call to remove ckpts is found within functions, like such: https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/model_checkpoint.py#L652 https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/model_checkpoint.py#L671 https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/model_checkpoint.py#L713 My suggestion would be to move this code into a separate function that can be overridden in the [NeMoModelCheckpoint](https://github.com/NVIDIA/NeMo/blob/4d3e99f986fd9354b5528a3e9c82fe01ec2df916/nemo/utils/exp_manager.py#L745) class by introducing a `delete_checkpoint` function. In this function we will also be able to delete the EMA weights. </issue> <code> [start of README.rst] 1 2 |status| |documentation| |license| |lgtm_grade| |lgtm_alerts| |black| 3 4 .. |status| image:: http://www.repostatus.org/badges/latest/active.svg 5 :target: http://www.repostatus.org/#active 6 :alt: Project Status: Active – The project has reached a stable, usable state and is being actively developed. 7 8 .. |documentation| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 9 :alt: Documentation 10 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 11 12 .. |license| image:: https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg 13 :target: https://github.com/NVIDIA/NeMo/blob/master/LICENSE 14 :alt: NeMo core license and license for collections in this repo 15 16 .. |lgtm_grade| image:: https://img.shields.io/lgtm/grade/python/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 17 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/context:python 18 :alt: Language grade: Python 19 20 .. |lgtm_alerts| image:: https://img.shields.io/lgtm/alerts/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 21 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/alerts/ 22 :alt: Total alerts 23 24 .. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg 25 :target: https://github.com/psf/black 26 :alt: Code style: black 27 28 .. _main-readme: 29 30 **NVIDIA NeMo** 31 =============== 32 33 Introduction 34 ------------ 35 36 NVIDIA NeMo is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), 37 text-to-speech synthesis (TTS), large language models (LLMs), and 38 natural language processing (NLP). 39 The primary objective of NeMo is to help researchers from industry and academia to reuse prior work (code and pretrained models) 40 and make it easier to create new `conversational AI models <https://developer.nvidia.com/conversational-ai#started>`_. 41 42 All NeMo models are trained with `Lightning <https://github.com/Lightning-AI/lightning>`_ and 43 training is automatically scalable to 1000s of GPUs. 44 Additionally, NeMo Megatron LLM models can be trained up to 1 trillion parameters using tensor and pipeline model parallelism. 45 NeMo models can be optimized for inference and deployed for production use-cases with `NVIDIA Riva <https://developer.nvidia.com/riva>`_. 46 47 Getting started with NeMo is simple. 48 State of the Art pretrained NeMo models are freely available on `HuggingFace Hub <https://huggingface.co/models?library=nemo&sort=downloads&search=nvidia>`_ and 49 `NVIDIA NGC <https://catalog.ngc.nvidia.com/models?query=nemo&orderBy=weightPopularDESC>`_. 50 These models can be used to transcribe audio, synthesize speech, or translate text in a just a few lines of code. 51 52 We have have extensive `tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_ that 53 can all be run on `Google Colab <https://colab.research.google.com>`_. 54 55 For advanced users that want to train NeMo models from scratch or finetune existing NeMo models 56 we have a full suite of `example scripts <https://github.com/NVIDIA/NeMo/tree/update_readme_into/examples>`_ that support multi-GPU/multi-node training. 57 58 Also see our `introductory video <https://www.youtube.com/embed/wBgpMf_KQVw>`_ for a high level overview of NeMo. 59 60 Key Features 61 ------------ 62 63 * Speech processing 64 * `HuggingFace Space for Audio Transcription (File, Micriphone and YouTube) <https://huggingface.co/spaces/smajumdar/nemo_multilingual_language_id>`_ 65 * `Automatic Speech Recognition (ASR) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/intro.html>`_ 66 * Supported models: Jasper, QuartzNet, CitriNet, Conformer-CTC, Conformer-Transducer, Squeezeformer-CTC, Squeezeformer-Transducer, ContextNet, LSTM-Transducer (RNNT), LSTM-CTC, ... 67 * Supports CTC and Transducer/RNNT losses/decoders 68 * NeMo Original `Multi-blank Transducers <https://arxiv.org/abs/2211.03541>`_ 69 * Beam Search decoding 70 * `Language Modelling for ASR <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html>`_: N-gram LM in fusion with Beam Search decoding, Neural Rescoring with Transformer 71 * Streaming and Buffered ASR (CTC/Transducer) - `Chunked Inference Examples <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_chunked_inference>`_ 72 * `Support of long audios for Conformer with memory efficient local attention <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html#inference-on-long-audio>`_ 73 * `Speech Classification and Speech Command Recognition <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/intro.html>`_: MatchboxNet (Command Recognition) 74 * `Voice activity Detection (VAD) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speech_classification/models.html#marblenet-vad>`_: MarbleNet 75 * ASR with VAD Inference - `Example <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_vad>`_ 76 * `Speaker Recognition <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/intro.html>`_: TitaNet, ECAPA_TDNN, SpeakerNet 77 * `Speaker Diarization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_diarization/intro.html>`_ 78 * Clustering Diarizer: TitaNet, ECAPA_TDNN, SpeakerNet 79 * Neural Diarizer: MSDD (Multi-scale Diarization Decoder) 80 * `Speech Intent Detection and Slot Filling <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_intent_slot/intro.html>`_: Conformer-Transformer 81 * `Pretrained models on different languages. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_: English, Spanish, German, Russian, Chinese, French, Italian, Polish, ... 82 * `NGC collection of pre-trained speech processing models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_ 83 * Natural Language Processing 84 * `NeMo Megatron pre-training of Large Language Models <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/nemo_megatron/intro.html>`_ 85 * `Neural Machine Translation (NMT) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/machine_translation/machine_translation.html>`_ 86 * `Punctuation and Capitalization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html>`_ 87 * `Token classification (named entity recognition) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/token_classification.html>`_ 88 * `Text classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_classification.html>`_ 89 * `Joint Intent and Slot Classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/joint_intent_slot.html>`_ 90 * `Question answering <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/question_answering.html>`_ 91 * `GLUE benchmark <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/glue_benchmark.html>`_ 92 * `Information retrieval <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/information_retrieval.html>`_ 93 * `Entity Linking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/entity_linking.html>`_ 94 * `Dialogue State Tracking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/sgd_qa.html>`_ 95 * `Prompt Learning <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/nemo_megatron/prompt_learning.html>`_ 96 * `NGC collection of pre-trained NLP models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_nlp>`_ 97 * `Synthetic Tabular Data Generation <https://developer.nvidia.com/blog/generating-synthetic-data-with-transformers-a-solution-for-enterprise-data-challenges/>`_ 98 * `Speech synthesis (TTS) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tts/intro.html#>`_ 99 * Spectrogram generation: Tacotron2, GlowTTS, TalkNet, FastPitch, FastSpeech2, Mixer-TTS, Mixer-TTS-X 100 * Vocoders: WaveGlow, SqueezeWave, UniGlow, MelGAN, HiFiGAN, UnivNet 101 * End-to-end speech generation: FastPitch_HifiGan_E2E, FastSpeech2_HifiGan_E2E 102 * `NGC collection of pre-trained TTS models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_tts>`_ 103 * `Tools <https://github.com/NVIDIA/NeMo/tree/stable/tools>`_ 104 * `Text Processing (text normalization and inverse text normalization) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization/intro.html>`_ 105 * `CTC-Segmentation tool <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/ctc_segmentation.html>`_ 106 * `Speech Data Explorer <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/speech_data_explorer.html>`_: a dash-based tool for interactive exploration of ASR/TTS datasets 107 108 109 Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes. 110 111 Requirements 112 ------------ 113 114 1) Python 3.8 or above 115 2) Pytorch 1.10.0 or above 116 3) NVIDIA GPU for training 117 118 Documentation 119 ------------- 120 121 .. |main| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 122 :alt: Documentation Status 123 :scale: 100% 124 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 125 126 .. |stable| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=stable 127 :alt: Documentation Status 128 :scale: 100% 129 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/ 130 131 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 132 | Version | Status | Description | 133 +=========+=============+==========================================================================================================================================+ 134 | Latest | |main| | `Documentation of the latest (i.e. main) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/>`_ | 135 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 136 | Stable | |stable| | `Documentation of the stable (i.e. most recent release) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/>`_ | 137 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 138 139 Tutorials 140 --------- 141 A great way to start with NeMo is by checking `one of our tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_. 142 143 Getting help with NeMo 144 ---------------------- 145 FAQ can be found on NeMo's `Discussions board <https://github.com/NVIDIA/NeMo/discussions>`_. You are welcome to ask questions or start discussions there. 146 147 148 Installation 149 ------------ 150 151 Conda 152 ~~~~~ 153 154 We recommend installing NeMo in a fresh Conda environment. 155 156 .. code-block:: bash 157 158 conda create --name nemo python==3.8 159 conda activate nemo 160 161 Install PyTorch using their `configurator <https://pytorch.org/get-started/locally/>`_. 162 163 .. code-block:: bash 164 165 conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch 166 167 .. note:: 168 169 The command used to install PyTorch may depend on your system. 170 171 Pip 172 ~~~ 173 Use this installation mode if you want the latest released version. 174 175 .. code-block:: bash 176 177 apt-get update && apt-get install -y libsndfile1 ffmpeg 178 pip install Cython 179 pip install nemo_toolkit['all'] 180 181 .. note:: 182 183 Depending on the shell used, you may need to use ``"nemo_toolkit[all]"`` instead in the above command. 184 185 Pip from source 186 ~~~~~~~~~~~~~~~ 187 Use this installation mode if you want the a version from particular GitHub branch (e.g main). 188 189 .. code-block:: bash 190 191 apt-get update && apt-get install -y libsndfile1 ffmpeg 192 pip install Cython 193 python -m pip install git+https://github.com/NVIDIA/NeMo.git@{BRANCH}#egg=nemo_toolkit[all] 194 195 196 From source 197 ~~~~~~~~~~~ 198 Use this installation mode if you are contributing to NeMo. 199 200 .. code-block:: bash 201 202 apt-get update && apt-get install -y libsndfile1 ffmpeg 203 git clone https://github.com/NVIDIA/NeMo 204 cd NeMo 205 ./reinstall.sh 206 207 .. note:: 208 209 If you only want the toolkit without additional conda-based dependencies, you may replace ``reinstall.sh`` 210 with ``pip install -e .`` when your PWD is the root of the NeMo repository. 211 212 RNNT 213 ~~~~ 214 Note that RNNT requires numba to be installed from conda. 215 216 .. code-block:: bash 217 218 conda remove numba 219 pip uninstall numba 220 conda install -c conda-forge numba 221 222 NeMo Megatron 223 ~~~~~~~~~~~~~ 224 NeMo Megatron training requires NVIDIA Apex to be installed. 225 Install it manually if not using the NVIDIA PyTorch container. 226 227 .. code-block:: bash 228 229 git clone https://github.com/ericharper/apex.git 230 cd apex 231 git checkout nm_v1.14.0 232 pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" --global-option="--fast_layer_norm" --global-option="--distributed_adam" --global-option="--deprecated_fused_adam" ./ 233 234 Transformer Engine 235 ~~~~~~~~~~~~~~~~~~ 236 NeMo Megatron GPT has been integrated with `NVIDIA Transformer Engine <https://github.com/NVIDIA/TransformerEngine>`_ 237 Transformer Engine enables FP8 training on NVIDIA Hopper GPUs. 238 `Install <https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/installation.html>`_ it manually if not using the NVIDIA PyTorch container. 239 240 .. note:: 241 242 Transformer Engine requires PyTorch to be built with CUDA 11.8. 243 244 NeMo Text Processing 245 ~~~~~~~~~~~~~~~~~~~~ 246 NeMo Text Processing, specifically (Inverse) Text Normalization, requires `Pynini <https://pypi.org/project/pynini/>`_ to be installed. 247 248 .. code-block:: bash 249 250 bash NeMo/nemo_text_processing/install_pynini.sh 251 252 Docker containers: 253 ~~~~~~~~~~~~~~~~~~ 254 To build a nemo container with Dockerfile from a branch, please run 255 256 .. code-block:: bash 257 258 DOCKER_BUILDKIT=1 docker build -f Dockerfile -t nemo:latest . 259 260 261 If you chose to work with main branch, we recommend using NVIDIA's PyTorch container version 22.12-py3 and then installing from GitHub. 262 263 .. code-block:: bash 264 265 docker run --gpus all -it --rm -v <nemo_github_folder>:/NeMo --shm-size=8g \ 266 -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit \ 267 stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:22.12-py3 268 269 Examples 270 -------- 271 272 Many examples can be found under `"Examples" <https://github.com/NVIDIA/NeMo/tree/stable/examples>`_ folder. 273 274 275 Contributing 276 ------------ 277 278 We welcome community contributions! Please refer to the `CONTRIBUTING.md <https://github.com/NVIDIA/NeMo/blob/stable/CONTRIBUTING.md>`_ CONTRIBUTING.md for the process. 279 280 Publications 281 ------------ 282 283 We provide an ever growing list of publications that utilize the NeMo framework. Please refer to `PUBLICATIONS.md <https://github.com/NVIDIA/NeMo/tree/stable/PUBLICATIONS.md>`_. We welcome the addition of your own articles to this list ! 284 285 Citation 286 -------- 287 288 .. code-block:: bash 289 290 @article{kuchaiev2019nemo, 291 title={Nemo: a toolkit for building ai applications using neural modules}, 292 author={Kuchaiev, Oleksii and Li, Jason and Nguyen, Huyen and Hrinchuk, Oleksii and Leary, Ryan and Ginsburg, Boris and Kriman, Samuel and Beliaev, Stanislav and Lavrukhin, Vitaly and Cook, Jack and others}, 293 journal={arXiv preprint arXiv:1909.09577}, 294 year={2019} 295 } 296 297 License 298 ------- 299 NeMo is under `Apache 2.0 license <https://github.com/NVIDIA/NeMo/blob/stable/LICENSE>`_. 300 [end of README.rst] [start of nemo/collections/common/callbacks/ema.py] 1 # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import contextlib 15 import copy 16 import logging 17 import os 18 import threading 19 from typing import Any, Dict, Iterable 20 21 import pytorch_lightning as pl 22 import torch 23 from pytorch_lightning import Callback 24 from pytorch_lightning.utilities.exceptions import MisconfigurationException 25 26 27 class EMA(Callback): 28 """ 29 Implements Exponential Moving Averaging (EMA). 30 31 When training a model, this callback will maintain moving averages of the trained parameters. 32 When evaluating, we use the moving averages copy of the trained parameters. 33 When saving, we save an additional set of parameters with the prefix `ema`. 34 35 Args: 36 decay: The exponential decay used when calculating the moving average. Has to be between 0-1. 37 validate_original_weights: Validate the original weights, as apposed to the EMA weights. 38 every_n_steps: Apply EMA every N steps. 39 cpu_offload: Offload weights to CPU. 40 """ 41 42 def __init__( 43 self, decay: float, validate_original_weights: bool = False, every_n_steps: int = 1, cpu_offload: bool = False, 44 ): 45 if not (0 <= decay <= 1): 46 raise MisconfigurationException("EMA decay value must be between 0 and 1") 47 self.decay = decay 48 self.validate_original_weights = validate_original_weights 49 self.every_n_steps = every_n_steps 50 self.cpu_offload = cpu_offload 51 52 def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: 53 device = pl_module.device if not self.cpu_offload else torch.device('cpu') 54 trainer.optimizers = [ 55 EMAOptimizer( 56 optim, 57 device=device, 58 decay=self.decay, 59 every_n_steps=self.every_n_steps, 60 current_step=trainer.global_step, 61 ) 62 for optim in trainer.optimizers 63 if not isinstance(optim, EMAOptimizer) 64 ] 65 66 def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: 67 if self._should_validate_ema_weights(trainer): 68 self.swap_model_weights(trainer) 69 70 def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: 71 if self._should_validate_ema_weights(trainer): 72 self.swap_model_weights(trainer) 73 74 def on_test_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: 75 if self._should_validate_ema_weights(trainer): 76 self.swap_model_weights(trainer) 77 78 def on_test_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: 79 if self._should_validate_ema_weights(trainer): 80 self.swap_model_weights(trainer) 81 82 def _should_validate_ema_weights(self, trainer: "pl.Trainer") -> bool: 83 return not self.validate_original_weights and self._ema_initialized(trainer) 84 85 def _ema_initialized(self, trainer: "pl.Trainer") -> bool: 86 return any(isinstance(optimizer, EMAOptimizer) for optimizer in trainer.optimizers) 87 88 def swap_model_weights(self, trainer: "pl.Trainer", saving_ema_model: bool = False): 89 for optimizer in trainer.optimizers: 90 assert isinstance(optimizer, EMAOptimizer) 91 optimizer.switch_main_parameter_weights(saving_ema_model) 92 93 @contextlib.contextmanager 94 def save_ema_model(self, trainer: "pl.Trainer"): 95 """ 96 Saves an EMA copy of the model + EMA optimizer states for resume. 97 """ 98 self.swap_model_weights(trainer, saving_ema_model=True) 99 try: 100 yield 101 finally: 102 self.swap_model_weights(trainer, saving_ema_model=False) 103 104 @contextlib.contextmanager 105 def save_original_optimizer_state(self, trainer: "pl.Trainer"): 106 for optimizer in trainer.optimizers: 107 assert isinstance(optimizer, EMAOptimizer) 108 optimizer.save_original_optimizer_state = True 109 try: 110 yield 111 finally: 112 for optimizer in trainer.optimizers: 113 optimizer.save_original_optimizer_state = False 114 115 def on_load_checkpoint( 116 self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", checkpoint: Dict[str, Any] 117 ) -> None: 118 checkpoint_callback = trainer.checkpoint_callback 119 120 if trainer.ckpt_path and checkpoint_callback is not None and 'NeMo' in type(checkpoint_callback).__name__: 121 ext = checkpoint_callback.FILE_EXTENSION 122 if trainer.ckpt_path.endswith(f'-EMA{ext}'): 123 logging.info( 124 "loading EMA based weights. " 125 "The callback will treat the loaded EMA weights as the main weights" 126 " and create a new EMA copy when training." 127 ) 128 return 129 ema_path = trainer.ckpt_path.replace(ext, f'-EMA{ext}') 130 if os.path.exists(ema_path): 131 ema_state_dict = torch.load(ema_path, map_location=torch.device('cpu')) 132 133 # this is wrong, basically when we save the EMA weights, optimizer_states actually contains the model parameters 134 # as we swapped the model parameters with the state dict parameters. 135 # we could enforce that if you trained with EMA and want to continue training 136 checkpoint['optimizer_states'] = ema_state_dict['optimizer_states'] 137 del ema_state_dict 138 logging.info("EMA state has been restored.") 139 else: 140 raise MisconfigurationException( 141 "Unable to find the associated EMA weights when re-loading, " 142 f"training will start with new EMA weights. Expected them to be at: {ema_path}", 143 ) 144 145 146 @torch.no_grad() 147 def ema_update(ema_model_tuple, current_model_tuple, decay): 148 torch._foreach_mul_(ema_model_tuple, decay) 149 torch._foreach_add_( 150 ema_model_tuple, current_model_tuple, alpha=(1.0 - decay), 151 ) 152 153 154 def run_ema_update_cpu(ema_model_tuple, current_model_tuple, decay, pre_sync_stream=None): 155 if pre_sync_stream is not None: 156 pre_sync_stream.synchronize() 157 158 ema_update(ema_model_tuple, current_model_tuple, decay) 159 160 161 class EMAOptimizer(torch.optim.Optimizer): 162 r""" 163 EMAOptimizer is a wrapper for torch.optim.Optimizer that computes 164 Exponential Moving Average of parameters registered in the optimizer. 165 166 EMA parameters are automatically updated after every step of the optimizer 167 with the following formula: 168 169 ema_weight = decay * ema_weight + (1 - decay) * training_weight 170 171 To access EMA parameters, use ``swap_ema_weights()`` context manager to 172 perform a temporary in-place swap of regular parameters with EMA 173 parameters. 174 175 Notes: 176 - EMAOptimizer is not compatible with APEX AMP O2. 177 178 Args: 179 optimizer (torch.optim.Optimizer): optimizer to wrap 180 device (torch.device): device for EMA parameters 181 decay (float): decay factor 182 183 Returns: 184 returns an instance of torch.optim.Optimizer that computes EMA of 185 parameters 186 187 Example: 188 model = Model().to(device) 189 opt = torch.optim.Adam(model.parameters()) 190 191 opt = EMAOptimizer(opt, device, 0.9999) 192 193 for epoch in range(epochs): 194 training_loop(model, opt) 195 196 regular_eval_accuracy = evaluate(model) 197 198 with opt.swap_ema_weights(): 199 ema_eval_accuracy = evaluate(model) 200 """ 201 202 def __init__( 203 self, 204 optimizer: torch.optim.Optimizer, 205 device: torch.device, 206 decay: float = 0.9999, 207 every_n_steps: int = 1, 208 current_step: int = 0, 209 ): 210 self.optimizer = optimizer 211 self.decay = decay 212 self.device = device 213 self.current_step = current_step 214 self.every_n_steps = every_n_steps 215 self.save_original_optimizer_state = False 216 217 self.first_iteration = True 218 self.rebuild_ema_params = True 219 self.stream = None 220 self.thread = None 221 222 self.ema_params = () 223 self.in_saving_ema_model_context = False 224 225 def all_parameters(self) -> Iterable[torch.Tensor]: 226 return (param for group in self.param_groups for param in group['params']) 227 228 def step(self, closure=None, **kwargs): 229 self.join() 230 231 if self.first_iteration: 232 if any(p.is_cuda for p in self.all_parameters()): 233 self.stream = torch.cuda.Stream() 234 235 self.first_iteration = False 236 237 if self.rebuild_ema_params: 238 opt_params = list(self.all_parameters()) 239 240 self.ema_params += tuple( 241 copy.deepcopy(param.data.detach()).to(self.device) for param in opt_params[len(self.ema_params) :] 242 ) 243 self.rebuild_ema_params = False 244 245 loss = self.optimizer.step(closure) 246 247 if self._should_update_at_step(): 248 self.update() 249 self.current_step += 1 250 return loss 251 252 def _should_update_at_step(self) -> bool: 253 return self.current_step % self.every_n_steps == 0 254 255 @torch.no_grad() 256 def update(self): 257 if self.stream is not None: 258 self.stream.wait_stream(torch.cuda.current_stream()) 259 260 with torch.cuda.stream(self.stream): 261 current_model_state = tuple( 262 param.data.to(self.device, non_blocking=True) for param in self.all_parameters() 263 ) 264 265 if self.device.type == 'cuda': 266 ema_update(self.ema_params, current_model_state, self.decay) 267 268 if self.device.type == 'cpu': 269 self.thread = threading.Thread( 270 target=run_ema_update_cpu, args=(self.ema_params, current_model_state, self.decay, self.stream,), 271 ) 272 self.thread.start() 273 274 def swap_tensors(self, tensor1, tensor2): 275 tmp = torch.empty_like(tensor1) 276 tmp.copy_(tensor1) 277 tensor1.copy_(tensor2) 278 tensor2.copy_(tmp) 279 280 def switch_main_parameter_weights(self, saving_ema_model: bool = False): 281 self.join() 282 self.in_saving_ema_model_context = saving_ema_model 283 for param, ema_param in zip(self.all_parameters(), self.ema_params): 284 self.swap_tensors(param.data, ema_param) 285 286 @contextlib.contextmanager 287 def swap_ema_weights(self, enabled: bool = True): 288 r""" 289 A context manager to in-place swap regular parameters with EMA 290 parameters. 291 It swaps back to the original regular parameters on context manager 292 exit. 293 294 Args: 295 enabled (bool): whether the swap should be performed 296 """ 297 298 if enabled: 299 self.switch_main_parameter_weights() 300 try: 301 yield 302 finally: 303 if enabled: 304 self.switch_main_parameter_weights() 305 306 def __getattr__(self, name): 307 return getattr(self.optimizer, name) 308 309 def join(self): 310 if self.stream is not None: 311 self.stream.synchronize() 312 313 if self.thread is not None: 314 self.thread.join() 315 316 def state_dict(self): 317 self.join() 318 319 if self.save_original_optimizer_state: 320 return self.optimizer.state_dict() 321 322 # if we are in the context of saving an EMA model, the EMA weights are in the modules' actual weights 323 ema_params = self.ema_params if not self.in_saving_ema_model_context else list(self.all_parameters()) 324 state_dict = { 325 'opt': self.optimizer.state_dict(), 326 'ema': ema_params, 327 'current_step': self.current_step, 328 'decay': self.decay, 329 'every_n_steps': self.every_n_steps, 330 } 331 return state_dict 332 333 def load_state_dict(self, state_dict): 334 self.join() 335 336 self.optimizer.load_state_dict(state_dict['opt']) 337 self.ema_params = tuple(param.to(self.device) for param in copy.deepcopy(state_dict['ema'])) 338 self.current_step = state_dict['current_step'] 339 self.decay = state_dict['decay'] 340 self.every_n_steps = state_dict['every_n_steps'] 341 self.rebuild_ema_params = False 342 343 def add_param_group(self, param_group): 344 self.optimizer.add_param_group(param_group) 345 self.rebuild_ema_params = True 346 [end of nemo/collections/common/callbacks/ema.py] [start of nemo/utils/exp_manager.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import glob 16 import os 17 import re 18 import subprocess 19 import sys 20 import time 21 import warnings 22 from copy import deepcopy 23 from dataclasses import dataclass 24 from datetime import timedelta 25 from pathlib import Path 26 from shutil import copy, move 27 from typing import Any, Dict, List, Optional, Tuple, Union 28 29 import pytorch_lightning 30 import torch 31 from hydra.core.hydra_config import HydraConfig 32 from hydra.utils import get_original_cwd 33 from omegaconf import DictConfig, OmegaConf, open_dict 34 from pytorch_lightning.callbacks import Callback, ModelCheckpoint 35 from pytorch_lightning.callbacks.timer import Interval, Timer 36 from pytorch_lightning.loggers import MLFlowLogger, TensorBoardLogger, WandbLogger 37 from pytorch_lightning.loops import TrainingEpochLoop 38 from pytorch_lightning.strategies.ddp import DDPStrategy 39 from pytorch_lightning.utilities import rank_zero_info 40 41 from nemo.collections.common.callbacks import EMA 42 from nemo.constants import NEMO_ENV_VARNAME_TESTING, NEMO_ENV_VARNAME_VERSION 43 from nemo.utils import logging, timers 44 from nemo.utils.app_state import AppState 45 from nemo.utils.dllogger import DLLogger 46 from nemo.utils.env_var_parsing import get_envbool 47 from nemo.utils.exceptions import NeMoBaseException 48 from nemo.utils.get_rank import is_global_rank_zero 49 from nemo.utils.lightning_logger_patch import add_filehandlers_to_pl_logger 50 from nemo.utils.model_utils import inject_model_parallel_rank, uninject_model_parallel_rank 51 52 53 class NotFoundError(NeMoBaseException): 54 """ Raised when a file or folder is not found""" 55 56 57 class LoggerMisconfigurationError(NeMoBaseException): 58 """ Raised when a mismatch between trainer.logger and exp_manager occurs""" 59 60 def __init__(self, message): 61 message = ( 62 message 63 + " You can disable lighning's trainer from creating a logger by passing logger=False to its constructor." 64 ) 65 super().__init__(message) 66 67 68 class CheckpointMisconfigurationError(NeMoBaseException): 69 """ Raised when a mismatch between trainer.callbacks and exp_manager occurs""" 70 71 72 @dataclass 73 class CallbackParams: 74 filepath: Optional[str] = None # Deprecated 75 dirpath: Optional[str] = None # If None, exp_manager will attempt to handle the filepath 76 filename: Optional[str] = None # If None, exp_manager will attempt to handle the filepath 77 monitor: Optional[str] = "val_loss" 78 verbose: Optional[bool] = True 79 save_last: Optional[bool] = True 80 save_top_k: Optional[int] = 3 81 save_weights_only: Optional[bool] = False 82 mode: Optional[str] = "min" 83 every_n_epochs: Optional[int] = 1 84 prefix: Optional[str] = None # If None, exp_manager will attempt to handle the filepath 85 postfix: str = ".nemo" 86 save_best_model: bool = False 87 always_save_nemo: bool = False 88 save_nemo_on_train_end: Optional[bool] = True # Whether to automatically save .nemo file durin on_train_end hook 89 model_parallel_size: Optional[int] = None # tensor parallel size * pipeline parallel size 90 91 92 @dataclass 93 class MLFlowParams: 94 # name of experiment, if none, defaults to the globally set experiment name 95 experiment_name: Optional[str] = None 96 # no run_name because it's set by version 97 # local or remote tracking seerver. If tracking_uri is not set, it defaults to save_dir 98 tracking_uri: Optional[str] = None 99 tags: Optional[Dict[str, Any]] = None 100 save_dir: Optional[str] = "./mlruns" 101 prefix: str = "" 102 artifact_location: Optional[str] = None 103 # provide run_id if resuming a previously started run 104 run_id: Optional[str] = None 105 106 107 @dataclass 108 class DLLoggerParams: 109 verbose: Optional[bool] = False 110 stdout: Optional[bool] = False 111 json_file: Optional[str] = "./dllogger.json" 112 113 114 @dataclass 115 class StepTimingParams: 116 reduction: Optional[str] = "mean" 117 # if True torch.cuda.synchronize() is called on start/stop 118 sync_cuda: Optional[bool] = False 119 # if positive, defines the size of a sliding window for computing mean 120 buffer_size: Optional[int] = 1 121 122 123 @dataclass 124 class EMAParams: 125 enable: Optional[bool] = False 126 decay: Optional[float] = 0.999 127 cpu_offload: Optional[bool] = False 128 validate_original_weights: Optional[bool] = False 129 every_n_steps: int = 1 130 131 132 @dataclass 133 class ExpManagerConfig: 134 # Log dir creation parameters 135 explicit_log_dir: Optional[str] = None 136 exp_dir: Optional[str] = None 137 name: Optional[str] = None 138 version: Optional[str] = None 139 use_datetime_version: Optional[bool] = True 140 resume_if_exists: Optional[bool] = False 141 resume_past_end: Optional[bool] = False 142 resume_ignore_no_checkpoint: Optional[bool] = False 143 # Logging parameters 144 create_tensorboard_logger: Optional[bool] = True 145 summary_writer_kwargs: Optional[Dict[Any, Any]] = None 146 create_wandb_logger: Optional[bool] = False 147 wandb_logger_kwargs: Optional[Dict[Any, Any]] = None 148 create_mlflow_logger: Optional[bool] = False 149 mlflow_logger_kwargs: Optional[MLFlowParams] = MLFlowParams() 150 create_dllogger_logger: Optional[bool] = False 151 dllogger_logger_kwargs: Optional[DLLoggerParams] = DLLoggerParams() 152 # Checkpointing parameters 153 create_checkpoint_callback: Optional[bool] = True 154 checkpoint_callback_params: Optional[CallbackParams] = CallbackParams() 155 # Additional exp_manager arguments 156 files_to_copy: Optional[List[str]] = None 157 # logs timing of train/val/test steps 158 log_step_timing: Optional[bool] = True 159 step_timing_kwargs: Optional[StepTimingParams] = StepTimingParams() 160 # Configures creation of log files for different ranks 161 log_local_rank_0_only: Optional[bool] = False 162 log_global_rank_0_only: Optional[bool] = False 163 # disable initial validation when resuming from a checkpoint saved during validation 164 disable_validation_on_resume: Optional[bool] = True 165 ema: Optional[EMAParams] = EMAParams() 166 # Wall clock time limit 167 max_time_per_run: Optional[str] = None 168 169 170 class TimingCallback(Callback): 171 """ 172 Logs execution time of train/val/test steps 173 """ 174 175 def __init__(self, timer_kwargs={}): 176 self.timer = timers.NamedTimer(**timer_kwargs) 177 178 def _on_batch_start(self, name): 179 # reset only if we do not return mean of a sliding window 180 if self.timer.buffer_size <= 0: 181 self.timer.reset(name) 182 183 self.timer.start(name) 184 185 def _on_batch_end(self, name, pl_module): 186 self.timer.stop(name) 187 pl_module.log(name, self.timer[name], on_step=True, on_epoch=False) 188 189 def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): 190 self._on_batch_start("train_step_timing") 191 192 def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): 193 self._on_batch_end("train_step_timing", pl_module) 194 195 def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): 196 self._on_batch_start("validation_step_timing") 197 198 def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): 199 self._on_batch_end("validation_step_timing", pl_module) 200 201 def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): 202 self._on_batch_start("test_step_timing") 203 204 def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): 205 self._on_batch_end("test_step_timing", pl_module) 206 207 def on_before_backward(self, trainer, pl_module, loss): 208 self._on_batch_start("train_backward_timing") 209 210 def on_after_backward(self, trainer, pl_module): 211 self._on_batch_end("train_backward_timing", pl_module) 212 213 214 def exp_manager(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None) -> Optional[Path]: 215 """ 216 exp_manager is a helper function used to manage folders for experiments. It follows the pytorch lightning paradigm 217 of exp_dir/model_or_experiment_name/version. If the lightning trainer has a logger, exp_manager will get exp_dir, 218 name, and version from the logger. Otherwise it will use the exp_dir and name arguments to create the logging 219 directory. exp_manager also allows for explicit folder creation via explicit_log_dir. 220 221 The version can be a datetime string or an integer. Datestime version can be disabled if use_datetime_version is set 222 to False. It optionally creates TensorBoardLogger, WandBLogger, DLLogger, MLFlowLogger, ModelCheckpoint objects from pytorch lightning. 223 It copies sys.argv, and git information if available to the logging directory. It creates a log file for each 224 process to log their output into. 225 226 exp_manager additionally has a resume feature (resume_if_exists) which can be used to continuing training from 227 the constructed log_dir. When you need to continue the training repeatedly (like on a cluster which you need 228 multiple consecutive jobs), you need to avoid creating the version folders. Therefore from v1.0.0, when 229 resume_if_exists is set to True, creating the version folders is ignored. 230 231 Args: 232 trainer (pytorch_lightning.Trainer): The lightning trainer. 233 cfg (DictConfig, dict): Can have the following keys: 234 235 - explicit_log_dir (str, Path): Can be used to override exp_dir/name/version folder creation. Defaults to 236 None, which will use exp_dir, name, and version to construct the logging directory. 237 - exp_dir (str, Path): The base directory to create the logging directory. Defaults to None, which logs to 238 ./nemo_experiments. 239 - name (str): The name of the experiment. Defaults to None which turns into "default" via name = name or 240 "default". 241 - version (str): The version of the experiment. Defaults to None which uses either a datetime string or 242 lightning's TensorboardLogger system of using version_{int}. 243 - use_datetime_version (bool): Whether to use a datetime string for version. Defaults to True. 244 - resume_if_exists (bool): Whether this experiment is resuming from a previous run. If True, it sets 245 trainer._checkpoint_connector.resume_from_checkpoint_fit_path so that the trainer should auto-resume. exp_manager will move files 246 under log_dir to log_dir/run_{int}. Defaults to False. From v1.0.0, when resume_if_exists is True, 247 we would not create version folders to make it easier to find the log folder for next runs. 248 - resume_past_end (bool): exp_manager errors out if resume_if_exists is True and a checkpoint matching 249 ``*end.ckpt`` indicating a previous training run fully completed. This behaviour can be disabled, in which 250 case the ``*end.ckpt`` will be loaded by setting resume_past_end to True. Defaults to False. 251 - resume_ignore_no_checkpoint (bool): exp_manager errors out if resume_if_exists is True and no checkpoint 252 could be found. This behaviour can be disabled, in which case exp_manager will print a message and 253 continue without restoring, by setting resume_ignore_no_checkpoint to True. Defaults to False. 254 - create_tensorboard_logger (bool): Whether to create a tensorboard logger and attach it to the pytorch 255 lightning trainer. Defaults to True. 256 - summary_writer_kwargs (dict): A dictionary of kwargs that can be passed to lightning's TensorboardLogger 257 class. Note that log_dir is passed by exp_manager and cannot exist in this dict. Defaults to None. 258 - create_wandb_logger (bool): Whether to create a Weights and Baises logger and attach it to the pytorch 259 lightning trainer. Defaults to False. 260 - wandb_logger_kwargs (dict): A dictionary of kwargs that can be passed to lightning's WandBLogger 261 class. Note that name and project are required parameters if create_wandb_logger is True. 262 Defaults to None. 263 - create_mlflow_logger (bool): Whether to create an MLFlow logger and attach it to the pytorch lightning 264 training. Defaults to False 265 - mlflow_logger_kwargs (dict): optional parameters for the MLFlow logger 266 - create_dllogger_logger (bool): Whether to create an DLLogger logger and attach it to the pytorch lightning 267 training. Defaults to False 268 - dllogger_logger_kwargs (dict): optional parameters for the DLLogger logger 269 - create_checkpoint_callback (bool): Whether to create a ModelCheckpoint callback and attach it to the 270 pytorch lightning trainer. The ModelCheckpoint saves the top 3 models with the best "val_loss", the most 271 recent checkpoint under ``*last.ckpt``, and the final checkpoint after training completes under ``*end.ckpt``. 272 Defaults to True. 273 - files_to_copy (list): A list of files to copy to the experiment logging directory. Defaults to None which 274 copies no files. 275 - log_local_rank_0_only (bool): Whether to only create log files for local rank 0. Defaults to False. 276 Set this to True if you are using DDP with many GPUs and do not want many log files in your exp dir. 277 - log_global_rank_0_only (bool): Whether to only create log files for global rank 0. Defaults to False. 278 Set this to True if you are using DDP with many GPUs and do not want many log files in your exp dir. 279 - max_time (str): The maximum wall clock time *per run*. This is intended to be used on clusters where you want 280 a checkpoint to be saved after this specified time and be able to resume from that checkpoint. Defaults to None. 281 282 returns: 283 log_dir (Path): The final logging directory where logging files are saved. Usually the concatenation of 284 exp_dir, name, and version. 285 """ 286 # Add rank information to logger 287 # Note: trainer.global_rank and trainer.is_global_zero are not set until trainer.fit, so have to hack around it 288 local_rank = int(os.environ.get("LOCAL_RANK", 0)) 289 global_rank = trainer.node_rank * trainer.num_devices + local_rank 290 logging.rank = global_rank 291 292 if cfg is None: 293 logging.error("exp_manager did not receive a cfg argument. It will be disabled.") 294 return 295 if trainer.fast_dev_run: 296 logging.info("Trainer was called with fast_dev_run. exp_manager will return without any functionality.") 297 return 298 299 # Ensure passed cfg is compliant with ExpManagerConfig 300 schema = OmegaConf.structured(ExpManagerConfig) 301 if isinstance(cfg, dict): 302 cfg = OmegaConf.create(cfg) 303 elif not isinstance(cfg, DictConfig): 304 raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig") 305 cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True)) 306 cfg = OmegaConf.merge(schema, cfg) 307 308 error_checks(trainer, cfg) # Ensures that trainer options are compliant with NeMo and exp_manager arguments 309 310 log_dir, exp_dir, name, version = get_log_dir( 311 trainer=trainer, 312 exp_dir=cfg.exp_dir, 313 name=cfg.name, 314 version=cfg.version, 315 explicit_log_dir=cfg.explicit_log_dir, 316 use_datetime_version=cfg.use_datetime_version, 317 resume_if_exists=cfg.resume_if_exists, 318 ) 319 320 if cfg.resume_if_exists: 321 # Check for existing checkpoints in `dirpath` if it's specified, use <log_dir>/checkpoints otherwise 322 if cfg.checkpoint_callback_params.dirpath: 323 check_resume( 324 trainer, 325 log_dir, 326 cfg.resume_past_end, 327 cfg.resume_ignore_no_checkpoint, 328 cfg.checkpoint_callback_params.dirpath, 329 ) 330 else: 331 check_resume(trainer, log_dir, cfg.resume_past_end, cfg.resume_ignore_no_checkpoint) 332 333 checkpoint_name = name 334 # If name returned from get_log_dir is "", use cfg.name for checkpointing 335 if checkpoint_name is None or checkpoint_name == '': 336 checkpoint_name = cfg.name or "default" 337 338 # Set mlflow name if it's not set, before the main name is erased 339 if cfg.create_mlflow_logger and (not cfg.mlflow_logger_kwargs.get("experiment_name", None)): 340 cfg.mlflow_logger_kwargs.experiment_name = cfg.name 341 logging.warning( 342 'mlflow logger specified but no experiment name set. Using the same as Tensorboard: %s', 343 cfg.mlflow_logger_kwargs.experiment_name, 344 ) 345 346 cfg.name = name # Used for configure_loggers so that the log_dir is properly set even if name is "" 347 cfg.version = version 348 349 # update app_state with log_dir, exp_dir, etc 350 app_state = AppState() 351 app_state.log_dir = log_dir 352 app_state.exp_dir = exp_dir 353 app_state.name = name 354 app_state.version = version 355 app_state.checkpoint_name = checkpoint_name 356 app_state.create_checkpoint_callback = cfg.create_checkpoint_callback 357 app_state.checkpoint_callback_params = cfg.checkpoint_callback_params 358 359 # Create the logging directory if it does not exist 360 os.makedirs(log_dir, exist_ok=True) # Cannot limit creation to global zero as all ranks write to own log file 361 logging.info(f'Experiments will be logged at {log_dir}') 362 trainer._default_root_dir = log_dir 363 364 if cfg.log_local_rank_0_only is True and cfg.log_global_rank_0_only is True: 365 raise ValueError( 366 f"Cannot set both log_local_rank_0_only and log_global_rank_0_only to True. Please set either one or neither." 367 ) 368 369 # This is set if the env var NEMO_TESTING is set to True. 370 nemo_testing = get_envbool(NEMO_ENV_VARNAME_TESTING, False) 371 372 # Handle logging to file 373 log_file = log_dir / f'nemo_log_globalrank-{global_rank}_localrank-{local_rank}.txt' 374 if cfg.log_local_rank_0_only is True and not nemo_testing: 375 if local_rank == 0: 376 logging.add_file_handler(log_file) 377 elif cfg.log_global_rank_0_only is True and not nemo_testing: 378 if global_rank == 0: 379 logging.add_file_handler(log_file) 380 else: 381 # Logs on all ranks. 382 logging.add_file_handler(log_file) 383 384 # For some reason, LearningRateLogger requires trainer to have a logger. Safer to create logger on all ranks 385 # not just global rank 0. 386 if ( 387 cfg.create_tensorboard_logger 388 or cfg.create_wandb_logger 389 or cfg.create_mlflow_logger 390 or cfg.create_dllogger_logger 391 ): 392 configure_loggers( 393 trainer, 394 exp_dir, 395 cfg.name, 396 cfg.version, 397 cfg.create_tensorboard_logger, 398 cfg.summary_writer_kwargs, 399 cfg.create_wandb_logger, 400 cfg.wandb_logger_kwargs, 401 cfg.create_mlflow_logger, 402 cfg.mlflow_logger_kwargs, 403 cfg.create_dllogger_logger, 404 cfg.dllogger_logger_kwargs, 405 ) 406 407 # add loggers timing callbacks 408 if cfg.log_step_timing: 409 timing_callback = TimingCallback(timer_kwargs=cfg.step_timing_kwargs or {}) 410 trainer.callbacks.insert(0, timing_callback) 411 412 if cfg.ema.enable: 413 ema_callback = EMA( 414 decay=cfg.ema.decay, 415 validate_original_weights=cfg.ema.validate_original_weights, 416 cpu_offload=cfg.ema.cpu_offload, 417 every_n_steps=cfg.ema.every_n_steps, 418 ) 419 trainer.callbacks.append(ema_callback) 420 421 if cfg.create_checkpoint_callback: 422 configure_checkpointing( 423 trainer, log_dir, checkpoint_name, cfg.resume_if_exists, cfg.checkpoint_callback_params 424 ) 425 426 if cfg.disable_validation_on_resume: 427 # extend training loop to skip initial validation when resuming from checkpoint 428 configure_no_restart_validation_training_loop(trainer) 429 430 # Setup a stateless timer for use on clusters. 431 if cfg.max_time_per_run is not None: 432 found_ptl_timer = False 433 for idx, callback in enumerate(trainer.callbacks): 434 if isinstance(callback, Timer): 435 # NOTE: PTL does not expose a `trainer.max_time`. By the time we are in this function, PTL has already setup a timer if the user specifies `trainer.max_time` so best we can do is replace that. 436 # Working: If only `trainer.max_time` is set - it behaves as a normal PTL timer. If only `exp_manager.max_time_per_run` is set - it behaves as a StateLessTimer. If both are set, it also behaves as a StateLessTimer. 437 logging.warning( 438 f'Found a PTL Timer callback, replacing with a StatelessTimer callback. This will happen if you set trainer.max_time as well as exp_manager.max_time_per_run.' 439 ) 440 trainer.callbacks[idx] = StatelessTimer(cfg.max_time_per_run) 441 found_ptl_timer = True 442 break 443 444 if not found_ptl_timer: 445 trainer.max_time = cfg.max_time_per_run 446 trainer.callbacks.append(StatelessTimer(cfg.max_time_per_run)) 447 448 if is_global_rank_zero(): 449 # Move files_to_copy to folder and add git information if present 450 if cfg.files_to_copy: 451 for _file in cfg.files_to_copy: 452 copy(Path(_file), log_dir) 453 454 # Create files for cmd args and git info 455 with open(log_dir / 'cmd-args.log', 'w', encoding='utf-8') as _file: 456 _file.write(" ".join(sys.argv)) 457 458 # Try to get git hash 459 git_repo, git_hash = get_git_hash() 460 if git_repo: 461 with open(log_dir / 'git-info.log', 'w', encoding='utf-8') as _file: 462 _file.write(f'commit hash: {git_hash}') 463 _file.write(get_git_diff()) 464 465 # Add err_file logging to global_rank zero 466 logging.add_err_file_handler(log_dir / 'nemo_error_log.txt') 467 468 # Add lightning file logging to global_rank zero 469 add_filehandlers_to_pl_logger(log_dir / 'lightning_logs.txt', log_dir / 'nemo_error_log.txt') 470 471 return log_dir 472 473 474 def error_checks(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictConfig, Dict]] = None): 475 """ 476 Checks that the passed trainer is compliant with NeMo and exp_manager's passed configuration. Checks that: 477 - Throws error when hydra has changed the working directory. This causes issues with lightning's DDP 478 - Throws error when trainer has loggers defined but create_tensorboard_logger or create_wandB_logger 479 or create_mlflow_logger or create_dllogger_logger is True 480 - Prints error messages when 1) run on multi-node and not Slurm, and 2) run on multi-gpu without DDP 481 """ 482 if HydraConfig.initialized() and get_original_cwd() != os.getcwd(): 483 raise ValueError( 484 "Hydra changed the working directory. This interferes with ExpManger's functionality. Please pass " 485 "hydra.run.dir=. to your python script." 486 ) 487 if trainer.logger is not None and ( 488 cfg.create_tensorboard_logger or cfg.create_wandb_logger or cfg.create_mlflow_logger 489 ): 490 raise LoggerMisconfigurationError( 491 "The pytorch lightning trainer that was passed to exp_manager contained a logger, and either " 492 f"create_tensorboard_logger: {cfg.create_tensorboard_logger} or create_wandb_logger: " 493 f"{cfg.create_wandb_logger} or create_mlflow_logger: {cfg.create_mlflow_logger}" 494 f"or create_dllogger_logger: {cfg.create_mlflow_logger} was set to True. " 495 "These can only be used if trainer does not already have a logger." 496 ) 497 if trainer.num_nodes > 1 and not check_slurm(trainer): 498 logging.error( 499 "You are running multi-node training without SLURM handling the processes." 500 " Please note that this is not tested in NeMo and could result in errors." 501 ) 502 if trainer.num_devices > 1 and not isinstance(trainer.strategy, DDPStrategy): 503 logging.error( 504 "You are running multi-gpu without ddp.Please note that this is not tested in NeMo and could result in " 505 "errors." 506 ) 507 508 509 def check_resume( 510 trainer: 'pytorch_lightning.Trainer', 511 log_dir: str, 512 resume_past_end: bool = False, 513 resume_ignore_no_checkpoint: bool = False, 514 dirpath: str = None, 515 ): 516 """Checks that resume=True was used correctly with the arguments pass to exp_manager. Sets 517 trainer._checkpoint_connector.resume_from_checkpoint_fit_path as necessary. 518 519 Returns: 520 log_dir (Path): The log_dir 521 exp_dir (str): The base exp_dir without name nor version 522 name (str): The name of the experiment 523 version (str): The version of the experiment 524 525 Raises: 526 NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found. 527 ValueError: If resume is True, and there were more than 1 checkpoint could found. 528 """ 529 530 if not log_dir: 531 raise ValueError(f"Resuming requires the log_dir {log_dir} to be passed to exp_manager") 532 533 # Use <log_dir>/checkpoints/ unless `dirpath` is set 534 checkpoint_dir = Path(dirpath) if dirpath else Path(Path(log_dir) / "checkpoints") 535 536 checkpoint = None 537 end_checkpoints = list(checkpoint_dir.rglob("*end.ckpt")) 538 last_checkpoints = list(checkpoint_dir.rglob("*last.ckpt")) 539 if not checkpoint_dir.exists(): 540 if resume_ignore_no_checkpoint: 541 logging.warning( 542 f"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Training from scratch." 543 ) 544 return 545 else: 546 raise NotFoundError(f"There was no checkpoint folder at checkpoint_dir :{checkpoint_dir}. Cannot resume.") 547 elif len(end_checkpoints) > 0: 548 if resume_past_end: 549 if len(end_checkpoints) > 1: 550 if 'mp_rank' in str(end_checkpoints[0]): 551 checkpoint = end_checkpoints[0] 552 else: 553 raise ValueError(f"Multiple checkpoints {end_checkpoints} that matches *end.ckpt.") 554 logging.info(f"Resuming from {end_checkpoints[0]}") 555 else: 556 raise ValueError( 557 f"Found {end_checkpoints[0]} indicating that the last training run has already completed." 558 ) 559 elif not len(last_checkpoints) > 0: 560 if resume_ignore_no_checkpoint: 561 logging.warning(f"There were no checkpoints found in {checkpoint_dir}. Training from scratch.") 562 return 563 else: 564 raise NotFoundError(f"There were no checkpoints found in {checkpoint_dir}. Cannot resume.") 565 elif len(last_checkpoints) > 1: 566 if 'mp_rank' in str(last_checkpoints[0]) or 'tp_rank' in str(last_checkpoints[0]): 567 checkpoint = last_checkpoints[0] 568 checkpoint = uninject_model_parallel_rank(checkpoint) 569 else: 570 raise ValueError(f"Multiple checkpoints {last_checkpoints} that matches *last.ckpt.") 571 else: 572 logging.info(f"Resuming from {last_checkpoints[0]}") 573 checkpoint = last_checkpoints[0] 574 575 trainer._checkpoint_connector.resume_from_checkpoint_fit_path = str(checkpoint) 576 577 if is_global_rank_zero(): 578 # Check to see if any files exist that need to be moved 579 files_to_move = [] 580 for child in Path(log_dir).iterdir(): 581 if child.is_file(): 582 files_to_move.append(child) 583 584 if len(files_to_move) > 0: 585 # Move old files to a new folder 586 other_run_dirs = Path(log_dir).glob("run_*") 587 run_count = 0 588 for fold in other_run_dirs: 589 if fold.is_dir(): 590 run_count += 1 591 new_run_dir = Path(Path(log_dir) / f"run_{run_count}") 592 new_run_dir.mkdir() 593 for _file in files_to_move: 594 move(str(_file), str(new_run_dir)) 595 596 597 def check_explicit_log_dir( 598 trainer: 'pytorch_lightning.Trainer', explicit_log_dir: Union[Path, str], exp_dir: str, name: str, version: str 599 ) -> Tuple[Path, str, str, str]: 600 """ Checks that the passed arguments are compatible with explicit_log_dir. 601 602 Returns: 603 log_dir (Path): the log_dir 604 exp_dir (str): the base exp_dir without name nor version 605 name (str): The name of the experiment 606 version (str): The version of the experiment 607 608 Raise: 609 LoggerMisconfigurationError 610 """ 611 if trainer.logger is not None: 612 raise LoggerMisconfigurationError( 613 "The pytorch lightning trainer that was passed to exp_manager contained a logger and explicit_log_dir: " 614 f"{explicit_log_dir} was pass to exp_manager. Please remove the logger from the lightning trainer." 615 ) 616 # Checking only (explicit_log_dir) vs (exp_dir and version). 617 # The `name` will be used as the actual name of checkpoint/archive. 618 if exp_dir or version: 619 logging.error( 620 f"exp_manager received explicit_log_dir: {explicit_log_dir} and at least one of exp_dir: {exp_dir}, " 621 f"or version: {version}. Please note that exp_dir, name, and version will be ignored." 622 ) 623 if is_global_rank_zero() and Path(explicit_log_dir).exists(): 624 logging.warning(f"Exp_manager is logging to {explicit_log_dir}, but it already exists.") 625 return Path(explicit_log_dir), str(explicit_log_dir), "", "" 626 627 628 def get_log_dir( 629 trainer: 'pytorch_lightning.Trainer', 630 exp_dir: str = None, 631 name: str = None, 632 version: str = None, 633 explicit_log_dir: str = None, 634 use_datetime_version: bool = True, 635 resume_if_exists: bool = False, 636 ) -> Tuple[Path, str, str, str]: 637 """ 638 Obtains the log_dir used for exp_manager. 639 640 Returns: 641 log_dir (Path): the log_dir 642 exp_dir (str): the base exp_dir without name nor version 643 name (str): The name of the experiment 644 version (str): The version of the experiment 645 explicit_log_dir (str): The explicit path to the log folder. Defaults to False. 646 use_datetime_version (bool): Uses date and time as the version of the log folder. Defaults to True. 647 resume_if_exists (bool): if resume_if_exists of the exp_manager's config is enabled or not. When enabled, the 648 version folders would not get created. 649 650 Raise: 651 LoggerMisconfigurationError: If trainer is incompatible with arguments 652 NotFoundError: If resume is True, resume_ignore_no_checkpoint is False, and checkpoints could not be found. 653 ValueError: If resume is True, and there were more than 1 checkpoint could found. 654 """ 655 if explicit_log_dir: # If explicit log_dir was passed, short circuit 656 return check_explicit_log_dir(trainer, explicit_log_dir, exp_dir, name, version) 657 658 # Default exp_dir to ./nemo_experiments if None was passed 659 _exp_dir = exp_dir 660 if exp_dir is None: 661 _exp_dir = str(Path.cwd() / 'nemo_experiments') 662 663 # If the user has already defined a logger for the trainer, use the logger defaults for logging directory 664 if trainer.logger is not None: 665 if trainer.logger.save_dir: 666 if exp_dir: 667 raise LoggerMisconfigurationError( 668 "The pytorch lightning trainer that was passed to exp_manager contained a logger, the logger's " 669 f"save_dir was not None, and exp_dir ({exp_dir}) was not None. If trainer.logger.save_dir " 670 "exists, exp_manager will use trainer.logger.save_dir as the logging directory and exp_dir " 671 "must be None." 672 ) 673 _exp_dir = trainer.logger.save_dir 674 if name: 675 raise LoggerMisconfigurationError( 676 "The pytorch lightning trainer that was passed to exp_manager contained a logger, and name: " 677 f"{name} was also passed to exp_manager. If the trainer contains a " 678 "logger, exp_manager will use trainer.logger.name, and name passed to exp_manager must be None." 679 ) 680 name = trainer.logger.name 681 version = f"version_{trainer.logger.version}" 682 # Use user-defined exp_dir, project_name, exp_name, and versioning options 683 else: 684 name = name or "default" 685 version = version or os.environ.get(NEMO_ENV_VARNAME_VERSION, None) 686 687 if not version: 688 if resume_if_exists: 689 logging.warning( 690 "No version folders would be created under the log folder as 'resume_if_exists' is enabled." 691 ) 692 version = None 693 elif is_global_rank_zero(): 694 if use_datetime_version: 695 version = time.strftime('%Y-%m-%d_%H-%M-%S') 696 else: 697 tensorboard_logger = TensorBoardLogger(save_dir=Path(_exp_dir), name=name, version=version) 698 version = f"version_{tensorboard_logger.version}" 699 os.environ[NEMO_ENV_VARNAME_VERSION] = "" if version is None else version 700 701 log_dir = Path(_exp_dir) / Path(str(name)) / Path("" if version is None else str(version)) 702 return log_dir, str(_exp_dir), name, version 703 704 705 def get_git_hash(): 706 """ 707 Helper function that tries to get the commit hash if running inside a git folder 708 709 returns: 710 Bool: Whether the git subprocess ran without error 711 str: git subprocess output or error message 712 """ 713 try: 714 return ( 715 True, 716 subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT).decode(), 717 ) 718 except subprocess.CalledProcessError as err: 719 return False, "{}\n".format(err.output.decode("utf-8")) 720 721 722 def get_git_diff(): 723 """ 724 Helper function that tries to get the git diff if running inside a git folder 725 726 returns: 727 Bool: Whether the git subprocess ran without error 728 str: git subprocess output or error message 729 """ 730 try: 731 return subprocess.check_output(['git', 'diff'], stderr=subprocess.STDOUT).decode() 732 except subprocess.CalledProcessError as err: 733 return "{}\n".format(err.output.decode("utf-8")) 734 735 736 def configure_loggers( 737 trainer: 'pytorch_lightning.Trainer', 738 exp_dir: [Path, str], 739 name: str, 740 version: str, 741 create_tensorboard_logger: bool, 742 summary_writer_kwargs: dict, 743 create_wandb_logger: bool, 744 wandb_kwargs: dict, 745 create_mlflow_logger: bool, 746 mlflow_kwargs: dict, 747 create_dllogger_logger: bool, 748 dllogger_kwargs: dict, 749 ): 750 """ 751 Creates TensorboardLogger and/or WandBLogger / MLFlowLogger / DLlogger and attach them to trainer. 752 Raises ValueError if summary_writer_kwargs or wandb_kwargs are misconfigured. 753 """ 754 # Potentially create tensorboard logger and/or WandBLogger / MLFlowLogger / DLLogger 755 logger_list = [] 756 if create_tensorboard_logger: 757 if summary_writer_kwargs is None: 758 summary_writer_kwargs = {} 759 elif "log_dir" in summary_writer_kwargs: 760 raise ValueError( 761 "You cannot pass `log_dir` as part of `summary_writer_kwargs`. `log_dir` is handled by lightning's " 762 "TensorBoardLogger logger." 763 ) 764 tensorboard_logger = TensorBoardLogger(save_dir=exp_dir, name=name, version=version, **summary_writer_kwargs) 765 logger_list.append(tensorboard_logger) 766 logging.info("TensorboardLogger has been set up") 767 768 if create_wandb_logger: 769 if wandb_kwargs is None: 770 wandb_kwargs = {} 771 if "name" not in wandb_kwargs and "project" not in wandb_kwargs: 772 raise ValueError("name and project are required for wandb_logger") 773 774 # Update the wandb save_dir 775 if wandb_kwargs.get('save_dir', None) is None: 776 wandb_kwargs['save_dir'] = exp_dir 777 os.makedirs(wandb_kwargs['save_dir'], exist_ok=True) 778 wandb_logger = WandbLogger(version=version, **wandb_kwargs) 779 780 logger_list.append(wandb_logger) 781 logging.info("WandBLogger has been set up") 782 783 if create_mlflow_logger: 784 mlflow_logger = MLFlowLogger(run_name=version, **mlflow_kwargs) 785 786 logger_list.append(mlflow_logger) 787 logging.info("MLFlowLogger has been set up") 788 789 if create_dllogger_logger: 790 dllogger_logger = DLLogger(**dllogger_kwargs) 791 792 logger_list.append(dllogger_logger) 793 logging.info("DLLogger has been set up") 794 795 trainer._logger_connector.configure_logger(logger_list) 796 797 798 class NeMoModelCheckpoint(ModelCheckpoint): 799 """ Light wrapper around Lightning's ModelCheckpoint to force a saved checkpoint on train_end 800 """ 801 802 def __init__( 803 self, 804 always_save_nemo: bool = False, 805 save_nemo_on_train_end: bool = True, 806 save_best_model: bool = False, 807 postfix: str = ".nemo", 808 n_resume: bool = False, 809 model_parallel_size: int = None, 810 **kwargs, 811 ): 812 # Parse and store "extended" parameters: save_best model and postfix. 813 self.always_save_nemo = always_save_nemo 814 self.save_nemo_on_train_end = save_nemo_on_train_end 815 self.save_best_model = save_best_model 816 if self.save_best_model and not self.save_nemo_on_train_end: 817 logging.warning( 818 ( 819 "Found save_best_model is True and save_nemo_on_train_end is False. " 820 "Set save_nemo_on_train_end to True to automatically save the best model." 821 ) 822 ) 823 self.postfix = postfix 824 self.previous_best_path = "" 825 self.model_parallel_size = model_parallel_size 826 827 # `prefix` is deprecated 828 if 'prefix' in kwargs: 829 self.prefix = kwargs.pop('prefix') 830 else: 831 self.prefix = "" 832 833 # Call the parent class constructor with the remaining kwargs. 834 super().__init__(**kwargs) 835 836 if self.save_top_k != -1 and n_resume: 837 logging.debug("Checking previous runs") 838 self.nemo_topk_check_previous_run() 839 840 def nemo_topk_check_previous_run(self): 841 try: 842 self.best_k_models 843 self.kth_best_model_path 844 self.best_model_score 845 self.best_model_path 846 except AttributeError: 847 raise AttributeError("Lightning's ModelCheckpoint was updated. NeMoModelCheckpoint will need an update.") 848 self.best_k_models = {} 849 self.kth_best_model_path = "" 850 self.best_model_score = None 851 self.best_model_path = "" 852 853 checkpoints = list(Path(self.dirpath).rglob("*.ckpt")) 854 for checkpoint in checkpoints: 855 if 'mp_rank' in str(checkpoint) or 'tp_rank' in str(checkpoint): 856 checkpoint = uninject_model_parallel_rank(checkpoint) 857 checkpoint = str(checkpoint) 858 if checkpoint[-10:] == '-last.ckpt': 859 continue 860 index = checkpoint.find(self.monitor) + len(self.monitor) + 1 # Find monitor in str + 1 for '=' 861 if index != -1: 862 match = re.search('[A-z]', checkpoint[index:]) 863 if match: 864 value = checkpoint[index : index + match.start() - 1] # -1 due to separator hypen 865 self.best_k_models[checkpoint] = float(value) 866 if len(self.best_k_models) < 1: 867 return # No saved checkpoints yet 868 869 _reverse = False if self.mode == "min" else True 870 871 best_k_models = sorted(self.best_k_models, key=self.best_k_models.get, reverse=_reverse) 872 873 ### This section should be ok as rank zero will delete all excess checkpoints, since all other ranks are 874 ### instantiated after rank zero. models_to_delete should be 0 for all other ranks. 875 if self.model_parallel_size is not None: 876 models_to_delete = len(best_k_models) - self.model_parallel_size * self.save_top_k 877 else: 878 models_to_delete = len(best_k_models) - self.save_top_k 879 logging.debug(f'Number of models to delete: {models_to_delete}') 880 for _ in range(models_to_delete): 881 model = best_k_models.pop(-1) 882 self.best_k_models.pop(model) 883 self._del_model_without_trainer(model) 884 logging.debug(f"Removed checkpoint: {model}") 885 886 self.kth_best_model_path = best_k_models[-1] 887 self.best_model_path = best_k_models[0] 888 self.best_model_score = self.best_k_models[self.best_model_path] 889 890 def on_save_checkpoint(self, trainer, pl_module, checkpoint): 891 # output = None 892 output = super().on_save_checkpoint(trainer, pl_module, checkpoint) 893 if not self.always_save_nemo: 894 return output 895 else: 896 # Load the best model and then re-save it 897 app_state = AppState() 898 if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1: 899 raise ValueError(f'always_save_nemo is not implemented for model parallel models.') 900 # since we are creating tarfile artifacts we need to update .nemo path 901 app_state.model_restore_path = os.path.abspath( 902 os.path.expanduser(os.path.join(self.dirpath, self.prefix + self.postfix)) 903 ) 904 if self.save_best_model: 905 if not os.path.exists(self.best_model_path): 906 return output 907 908 if self.best_model_path == self.previous_best_path: 909 return output 910 911 self.previous_model_path = self.best_model_path 912 old_state_dict = deepcopy(pl_module.state_dict()) 913 checkpoint = torch.load(self.best_model_path, map_location='cpu') 914 if 'state_dict' in checkpoint: 915 checkpoint = checkpoint['state_dict'] 916 # get a new instanace of the model 917 pl_module.load_state_dict(checkpoint, strict=True) 918 pl_module.save_to(save_path=app_state.model_restore_path) 919 pl_module.load_state_dict(old_state_dict, strict=True) 920 else: 921 pl_module.save_to(save_path=app_state.model_restore_path) 922 return output 923 924 def on_train_end(self, trainer, pl_module): 925 if trainer.fast_dev_run: 926 return None 927 928 # check if we need to save a last checkpoint manually as validation isn't always run based on the interval 929 if self.save_last and trainer.val_check_interval != 0: 930 should_save_last_checkpoint = False 931 if isinstance(trainer.val_check_interval, float) and trainer.val_check_interval % trainer.global_step != 0: 932 should_save_last_checkpoint = True 933 if isinstance(trainer.val_check_interval, int) and trainer.global_step % trainer.val_check_interval != 0: 934 should_save_last_checkpoint = True 935 if should_save_last_checkpoint: 936 monitor_candidates = self._monitor_candidates(trainer) 937 super()._save_last_checkpoint(trainer, monitor_candidates) 938 # Call parent on_train_end() to save the -last checkpoint 939 super().on_train_end(trainer, pl_module) 940 941 # Load the best model and then re-save it 942 if self.save_best_model: 943 # wait for all processes 944 trainer.strategy.barrier("SaveBestCheckpointConnector.resume_end") 945 if self.best_model_path == "": 946 logging.warning( 947 f"{self} was told to save the best checkpoint at the end of training, but no saved checkpoints " 948 "were found. Saving latest model instead." 949 ) 950 else: 951 self.best_model_path = trainer.strategy.broadcast(self.best_model_path) 952 trainer._checkpoint_connector.restore(self.best_model_path) 953 954 if self.save_nemo_on_train_end: 955 pl_module.save_to(save_path=os.path.join(self.dirpath, self.prefix + self.postfix)) 956 957 def _del_model_without_trainer(self, filepath: str) -> None: 958 app_state = AppState() 959 if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1: 960 # filepath needs to be updated to include mp_rank 961 filepath = inject_model_parallel_rank(filepath) 962 963 # each model parallel rank needs to remove its model 964 if is_global_rank_zero() or (app_state.model_parallel_size is not None and app_state.data_parallel_rank == 0): 965 try: 966 self._fs.rm(filepath) 967 logging.info(f"Removed checkpoint: {filepath}") 968 except: 969 logging.info(f"Tried to remove checkpoint: {filepath} but failed.") 970 971 def _ema_callback(self, trainer: 'pytorch_lightning.Trainer') -> Optional[EMA]: 972 ema_callback = None 973 for callback in trainer.callbacks: 974 if isinstance(callback, EMA): 975 ema_callback = callback 976 return ema_callback 977 978 def _save_checkpoint(self, trainer: 'pytorch_lightning.Trainer', filepath: str) -> None: 979 ema_callback = self._ema_callback(trainer) 980 if ema_callback is not None: 981 with ema_callback.save_original_optimizer_state(trainer): 982 super()._save_checkpoint(trainer, filepath) 983 984 # save EMA copy of the model as well. 985 with ema_callback.save_ema_model(trainer): 986 filepath = self._ema_format_filepath(filepath) 987 if self.verbose: 988 rank_zero_info(f"Saving EMA weights to separate checkpoint {filepath}") 989 super()._save_checkpoint(trainer, filepath) 990 else: 991 super()._save_checkpoint(trainer, filepath) 992 993 def _ema_format_filepath(self, filepath: str) -> str: 994 return filepath.replace(self.FILE_EXTENSION, f'-EMA{self.FILE_EXTENSION}') 995 996 997 def configure_checkpointing( 998 trainer: 'pytorch_lightning.Trainer', log_dir: Path, name: str, resume: bool, params: 'DictConfig', 999 ): 1000 """ Adds ModelCheckpoint to trainer. Raises CheckpointMisconfigurationError if trainer already has a ModelCheckpoint 1001 callback 1002 """ 1003 for callback in trainer.callbacks: 1004 if isinstance(callback, ModelCheckpoint): 1005 raise CheckpointMisconfigurationError( 1006 "The pytorch lightning trainer that was passed to exp_manager contained a ModelCheckpoint " 1007 "and create_checkpoint_callback was set to True. Please either set create_checkpoint_callback " 1008 "to False, or remove ModelCheckpoint from the lightning trainer" 1009 ) 1010 # Create the callback and attach it to trainer 1011 if "filepath" in params: 1012 if params.filepath is not None: 1013 logging.warning("filepath is deprecated. Please switch to dirpath and filename instead") 1014 if params.dirpath is None: 1015 params.dirpath = Path(params.filepath).parent 1016 if params.filename is None: 1017 params.filename = Path(params.filepath).name 1018 with open_dict(params): 1019 del params["filepath"] 1020 if params.dirpath is None: 1021 params.dirpath = Path(log_dir / 'checkpoints') 1022 if params.filename is None: 1023 params.filename = f'{name}--{{{params.monitor}:.4f}}-{{epoch}}' 1024 if params.prefix is None: 1025 params.prefix = name 1026 NeMoModelCheckpoint.CHECKPOINT_NAME_LAST = params.filename + '-last' 1027 1028 logging.debug(params.dirpath) 1029 logging.debug(params.filename) 1030 logging.debug(params.prefix) 1031 1032 if "val" in params.monitor: 1033 if ( 1034 trainer.max_epochs is not None 1035 and trainer.max_epochs != -1 1036 and trainer.max_epochs < trainer.check_val_every_n_epoch 1037 ): 1038 logging.error( 1039 "The checkpoint callback was told to monitor a validation value but trainer.max_epochs(" 1040 f"{trainer.max_epochs}) was less than trainer.check_val_every_n_epoch({trainer.check_val_every_n_epoch}" 1041 f"). It is very likely this run will fail with ModelCheckpoint(monitor='{params.monitor}') not found " 1042 "in the returned metrics. Please ensure that validation is run within trainer.max_epochs." 1043 ) 1044 elif trainer.max_steps is not None and trainer.max_steps != -1: 1045 logging.warning( 1046 "The checkpoint callback was told to monitor a validation value and trainer's max_steps was set to " 1047 f"{trainer.max_steps}. Please ensure that max_steps will run for at least " 1048 f"{trainer.check_val_every_n_epoch} epochs to ensure that checkpointing will not error out." 1049 ) 1050 1051 checkpoint_callback = NeMoModelCheckpoint(n_resume=resume, **params) 1052 checkpoint_callback.last_model_path = trainer._checkpoint_connector.resume_from_checkpoint_fit_path or "" 1053 if 'mp_rank' in checkpoint_callback.last_model_path or 'tp_rank' in checkpoint_callback.last_model_path: 1054 checkpoint_callback.last_model_path = uninject_model_parallel_rank(checkpoint_callback.last_model_path) 1055 trainer.callbacks.append(checkpoint_callback) 1056 1057 1058 def check_slurm(trainer): 1059 try: 1060 return trainer.accelerator_connector.is_slurm_managing_tasks 1061 except AttributeError: 1062 return False 1063 1064 1065 class StatelessTimer(Timer): 1066 """Extension of PTL timers to be per run.""" 1067 1068 def __init__(self, duration: timedelta = None, interval: str = Interval.step, verbose: bool = True,) -> None: 1069 super().__init__(duration, interval, verbose) 1070 1071 # Override PTL Timer's state dict to not store elapsed time information so that we can restore and continue training. 1072 def state_dict(self) -> Dict[str, Any]: 1073 return {} 1074 1075 def load_state_dict(self, state_dict: Dict[str, Any]) -> None: 1076 return 1077 1078 1079 def configure_no_restart_validation_training_loop(trainer: pytorch_lightning.Trainer) -> None: 1080 if type(trainer.fit_loop.epoch_loop) != TrainingEpochLoop: 1081 warnings.warn("Detected custom epoch loop. Skipping no validation on restart support.", UserWarning) 1082 return 1083 loop = SkipResumeTrainingValidationLoop(trainer.min_steps, trainer.max_steps) 1084 loop.trainer = trainer 1085 trainer.fit_loop.epoch_loop = loop 1086 1087 1088 class SkipResumeTrainingValidationLoop(TrainingEpochLoop): 1089 """ 1090 Extend the PTL Epoch loop to skip validating when resuming. 1091 This happens when resuming a checkpoint that has already run validation, but loading restores 1092 the training state before validation has run. 1093 """ 1094 1095 def _should_check_val_fx(self) -> bool: 1096 if self.restarting and self.global_step % self.trainer.val_check_batch == 0: 1097 return False 1098 return super()._should_check_val_fx() 1099 1100 1101 def clean_exp_ckpt(exp_log_dir: Union[str, Path], remove_ckpt: bool = True, remove_nemo: bool = False): 1102 """ 1103 Helper method that removes Pytorch Lightning .ckpt files or NeMo .nemo files from the checkpoint directory 1104 1105 Args: 1106 exp_log_dir: str path to the root directory of the current experiment. 1107 remove_ckpt: bool, whether to remove all *.ckpt files in the checkpoints directory. 1108 remove_nemo: bool, whether to remove all *.nemo files in the checkpoints directory. 1109 """ 1110 exp_log_dir = str(exp_log_dir) 1111 1112 if remove_ckpt: 1113 logging.info("Deleting *.ckpt files ...") 1114 ckpt_files = glob.glob(os.path.join(exp_log_dir, "checkpoints", "*.ckpt")) 1115 for filepath in ckpt_files: 1116 os.remove(filepath) 1117 logging.info(f"Deleted file : {filepath}") 1118 1119 if remove_nemo: 1120 logging.info("Deleting *.nemo files ...") 1121 nemo_files = glob.glob(os.path.join(exp_log_dir, "checkpoints", "*.nemo")) 1122 for filepath in nemo_files: 1123 os.remove(filepath) 1124 logging.info(f"Deleted file : {filepath}") 1125 [end of nemo/utils/exp_manager.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
NVIDIA/NeMo
eee715f831f2b088075f75cc7c95de60f4ef1d38
EMA Doesn't delete previous EMA ckpts when k > 0 for checkpointing **Describe the bug** EMA saves a separate EMA based ckpt when normally saving. The problem is when saving a set amount of checkpoints (i.e k=5) the older EMA ckpts are not being deleted. This means there is an excess of EMA based ckpts. What is expected is that older EMA ckpts are deleted. **Proposed Solution** Overriding the code is tricky, as the call to remove ckpts is found within functions, like such: https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/model_checkpoint.py#L652 https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/model_checkpoint.py#L671 https://github.com/Lightning-AI/lightning/blob/master/src/pytorch_lightning/callbacks/model_checkpoint.py#L713 My suggestion would be to move this code into a separate function that can be overridden in the [NeMoModelCheckpoint](https://github.com/NVIDIA/NeMo/blob/4d3e99f986fd9354b5528a3e9c82fe01ec2df916/nemo/utils/exp_manager.py#L745) class by introducing a `delete_checkpoint` function. In this function we will also be able to delete the EMA weights.
cc @carmocca Ideally, we would find a better solution. However, since that would require a larger refactor - to `ModelCheckpoint` and/or NeMo - and the `NemoModelCheckpoint` callback already relies heavily on overriding protected methods, I'm fine with adding one more. That's sounds like a good interim measure. PR created here: https://github.com/Lightning-AI/lightning/pull/16067 Hopefully this can make it into the next bugfix release if possible. @Borda, do you agree with cherry picking this into a 1.8.x release? > @Borda, do you agree with cherry picking this into a 1.8.x release? yes, good with me
2023-01-03T11:05:25Z
<patch> diff --git a/nemo/collections/common/callbacks/ema.py b/nemo/collections/common/callbacks/ema.py --- a/nemo/collections/common/callbacks/ema.py +++ b/nemo/collections/common/callbacks/ema.py @@ -13,13 +13,13 @@ # limitations under the License. import contextlib import copy -import logging import os import threading from typing import Any, Dict, Iterable import pytorch_lightning as pl import torch +from lightning_utilities.core.rank_zero import rank_zero_info from pytorch_lightning import Callback from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -117,25 +117,26 @@ def on_load_checkpoint( ) -> None: checkpoint_callback = trainer.checkpoint_callback - if trainer.ckpt_path and checkpoint_callback is not None and 'NeMo' in type(checkpoint_callback).__name__: + # use the connector as NeMo calls the connector directly in the exp_manager when restoring. + connector = trainer._checkpoint_connector + ckpt_path = connector.resume_checkpoint_path + + if ckpt_path and checkpoint_callback is not None and 'NeMo' in type(checkpoint_callback).__name__: ext = checkpoint_callback.FILE_EXTENSION - if trainer.ckpt_path.endswith(f'-EMA{ext}'): - logging.info( + if ckpt_path.endswith(f'-EMA{ext}'): + rank_zero_info( "loading EMA based weights. " "The callback will treat the loaded EMA weights as the main weights" " and create a new EMA copy when training." ) return - ema_path = trainer.ckpt_path.replace(ext, f'-EMA{ext}') + ema_path = ckpt_path.replace(ext, f'-EMA{ext}') if os.path.exists(ema_path): ema_state_dict = torch.load(ema_path, map_location=torch.device('cpu')) - # this is wrong, basically when we save the EMA weights, optimizer_states actually contains the model parameters - # as we swapped the model parameters with the state dict parameters. - # we could enforce that if you trained with EMA and want to continue training checkpoint['optimizer_states'] = ema_state_dict['optimizer_states'] del ema_state_dict - logging.info("EMA state has been restored.") + rank_zero_info("EMA state has been restored.") else: raise MisconfigurationException( "Unable to find the associated EMA weights when re-loading, " diff --git a/nemo/utils/exp_manager.py b/nemo/utils/exp_manager.py --- a/nemo/utils/exp_manager.py +++ b/nemo/utils/exp_manager.py @@ -990,6 +990,14 @@ def _save_checkpoint(self, trainer: 'pytorch_lightning.Trainer', filepath: str) else: super()._save_checkpoint(trainer, filepath) + def _remove_checkpoint(self, trainer: "pytorch_lightning.Trainer", filepath: str) -> None: + super()._remove_checkpoint(trainer, filepath) + ema_callback = self._ema_callback(trainer) + if ema_callback is not None: + # remove EMA copy of the state dict as well. + filepath = self._ema_format_filepath(filepath) + super()._remove_checkpoint(trainer, filepath) + def _ema_format_filepath(self, filepath: str) -> str: return filepath.replace(self.FILE_EXTENSION, f'-EMA{self.FILE_EXTENSION}') </patch>
diff --git a/tests/collections/common/test_ema.py b/tests/collections/common/test_ema.py --- a/tests/collections/common/test_ema.py +++ b/tests/collections/common/test_ema.py @@ -27,7 +27,10 @@ from nemo.collections.common.callbacks.ema import EMAOptimizer from nemo.core import ModelPT from nemo.utils.exp_manager import exp_manager -from tests.collections.nlp.test_gpt_model import DEVICE_CAPABILITY + +DEVICE_CAPABILITY = None +if torch.cuda.is_available(): + DEVICE_CAPABILITY = torch.cuda.get_device_capability() def extract_ema_weights(pl_module, trainer): @@ -69,13 +72,22 @@ def val_dataloader(self): dataset = RandomDataset(32, 16) return torch.utils.data.DataLoader(dataset, batch_size=2) + def test_dataloader(self): + dataset = RandomDataset(32, 16) + dl = torch.utils.data.DataLoader(dataset, batch_size=2) + self._test_names = ['test_{}_'.format(idx) for idx in range(len(dl))] + return dl + def forward(self, batch): return self.l1(self.bn(batch)).sum() + def training_step(self, batch, batch_idx): + return self(batch) + def validation_step(self, batch, batch_idx): return self(batch) - def training_step(self, batch, batch_idx): + def test_step(self, batch, batch_idx): return self(batch) def configure_optimizers(self): @@ -90,6 +102,9 @@ def setup_training_data(self, train_data_config: Union[DictConfig, Dict]): def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]): pass + def setup_test_data(self, val_data_config: Union[DictConfig, Dict]): + pass + def validation_epoch_end(self, loss): self.log("val_loss", torch.stack(loss).mean()) @@ -248,6 +263,27 @@ def test_exp_manager_ema_weights(self, tmpdir): for saved_weight, ema_weight in zip(duplicate_model.state_dict().values(), ema_weights): assert torch.allclose(saved_weight.cpu(), ema_weight.cpu()) + @pytest.mark.unit + def test_exp_manager_ema_weights_topk(self, tmpdir): + """Test to ensure that EMA correctly ensures we only keep topk checkpoints.""" + tmp_path = tmpdir / "exp_manager_test" + model = ExampleModel() + save_top_k = 3 + + trainer = Trainer(max_epochs=10, enable_checkpointing=False, logger=False, devices=1) + exp_manager( + trainer, + { + "ema": {"enable": True}, + "explicit_log_dir": str(tmp_path), + "checkpoint_callback_params": {"save_top_k": save_top_k}, + }, + ) + trainer.fit(model) + + # we save 3 checkpoints for the model, 3 accompanied EMA weights, the last checkpoint and nemo model. + assert len(os.listdir(tmp_path / "checkpoints/")) == (save_top_k + 1) * 2 + 1 + class TestEMATrain: @pytest.mark.unit @@ -320,6 +356,23 @@ def run_training_test(self, accumulate_grad_batches, validate_original_weights, trainer.callbacks.insert(0, EMAValidationAssertCallback()) trainer.fit(model=model, val_dataloaders=model.train_dataloader()) + @pytest.mark.unit + def test_ema_run_with_save_best_model(self, tmpdir): + """Test to ensure that we save the model correctly when save best model is set to True.""" + tmp_path = tmpdir / "exp_manager_test" + model = ExampleModel() + + trainer = Trainer(max_epochs=1, enable_checkpointing=False, logger=False, devices=1, limit_train_batches=1) + exp_manager( + trainer, + { + "ema": {"enable": True}, + "explicit_log_dir": str(tmp_path), + "checkpoint_callback_params": {"save_best_model": True}, + }, + ) + trainer.fit(model) + class EMAAssertCallback(Callback): def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
1.0
NVIDIA__NeMo-6097
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spectrogram Enhancer doesn't generalize to spectrogram lengths unseen during training **Describe the bug** If one trains TTS spectrogram enhancer (#5565) on short spectrograms, at inference time it doesn't generalize to longer ones. A patch in the beginning gets enhanced but further frames do not. Example (before, after): ![before](https://user-images.githubusercontent.com/8864149/218520151-230501b1-4e9d-4307-b51e-526440b915c8.png) ![after](https://user-images.githubusercontent.com/8864149/218520199-22187a4b-9cf8-4ac9-b3ce-07c320d3d3b6.png) **Steps/Code to reproduce bug** 1. Train a spectrogram enhancer 2. Apply it to a spectrogram that's longer than anything from the training set 3. Only a patch in the beginning gets enhanced **Expected behavior** The whole spectrogram should have got additional details, not just the first patch </issue> <code> [start of README.rst] 1 2 |status| |documentation| |license| |lgtm_grade| |lgtm_alerts| |black| 3 4 .. |status| image:: http://www.repostatus.org/badges/latest/active.svg 5 :target: http://www.repostatus.org/#active 6 :alt: Project Status: Active – The project has reached a stable, usable state and is being actively developed. 7 8 .. |documentation| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 9 :alt: Documentation 10 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 11 12 .. |license| image:: https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg 13 :target: https://github.com/NVIDIA/NeMo/blob/master/LICENSE 14 :alt: NeMo core license and license for collections in this repo 15 16 .. |lgtm_grade| image:: https://img.shields.io/lgtm/grade/python/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 17 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/context:python 18 :alt: Language grade: Python 19 20 .. |lgtm_alerts| image:: https://img.shields.io/lgtm/alerts/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 21 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/alerts/ 22 :alt: Total alerts 23 24 .. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg 25 :target: https://github.com/psf/black 26 :alt: Code style: black 27 28 .. _main-readme: 29 30 **NVIDIA NeMo** 31 =============== 32 33 Introduction 34 ------------ 35 36 NVIDIA NeMo is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), 37 text-to-speech synthesis (TTS), large language models (LLMs), and 38 natural language processing (NLP). 39 The primary objective of NeMo is to help researchers from industry and academia to reuse prior work (code and pretrained models) 40 and make it easier to create new `conversational AI models <https://developer.nvidia.com/conversational-ai#started>`_. 41 42 All NeMo models are trained with `Lightning <https://github.com/Lightning-AI/lightning>`_ and 43 training is automatically scalable to 1000s of GPUs. 44 Additionally, NeMo Megatron LLM models can be trained up to 1 trillion parameters using tensor and pipeline model parallelism. 45 NeMo models can be optimized for inference and deployed for production use-cases with `NVIDIA Riva <https://developer.nvidia.com/riva>`_. 46 47 Getting started with NeMo is simple. 48 State of the Art pretrained NeMo models are freely available on `HuggingFace Hub <https://huggingface.co/models?library=nemo&sort=downloads&search=nvidia>`_ and 49 `NVIDIA NGC <https://catalog.ngc.nvidia.com/models?query=nemo&orderBy=weightPopularDESC>`_. 50 These models can be used to transcribe audio, synthesize speech, or translate text in a just a few lines of code. 51 52 We have have extensive `tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_ that 53 can all be run on `Google Colab <https://colab.research.google.com>`_. 54 55 For advanced users that want to train NeMo models from scratch or finetune existing NeMo models 56 we have a full suite of `example scripts <https://github.com/NVIDIA/NeMo/tree/main/examples>`_ that support multi-GPU/multi-node training. 57 58 Also see our `introductory video <https://www.youtube.com/embed/wBgpMf_KQVw>`_ for a high level overview of NeMo. 59 60 Key Features 61 ------------ 62 63 * Speech processing 64 * `HuggingFace Space for Audio Transcription (File, Microphone and YouTube) <https://huggingface.co/spaces/smajumdar/nemo_multilingual_language_id>`_ 65 * `Automatic Speech Recognition (ASR) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/intro.html>`_ 66 * Supported models: Jasper, QuartzNet, CitriNet, Conformer-CTC, Conformer-Transducer, Squeezeformer-CTC, Squeezeformer-Transducer, ContextNet, LSTM-Transducer (RNNT), LSTM-CTC, FastConformer-CTC, FastConformer-Transducer... 67 * Supports CTC and Transducer/RNNT losses/decoders 68 * NeMo Original `Multi-blank Transducers <https://arxiv.org/abs/2211.03541>`_ 69 * Beam Search decoding 70 * `Language Modelling for ASR <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html>`_: N-gram LM in fusion with Beam Search decoding, Neural Rescoring with Transformer 71 * Streaming and Buffered ASR (CTC/Transducer) - `Chunked Inference Examples <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_chunked_inference>`_ 72 * `Support of long audios for Conformer with memory efficient local attention <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html#inference-on-long-audio>`_ 73 * `Speech Classification, Speech Command Recognition and Language Identification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/intro.html>`_: MatchboxNet (Command Recognition), AmberNet (LangID) 74 * `Voice activity Detection (VAD) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speech_classification/models.html#marblenet-vad>`_: MarbleNet 75 * ASR with VAD Inference - `Example <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_vad>`_ 76 * `Speaker Recognition <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/intro.html>`_: TitaNet, ECAPA_TDNN, SpeakerNet 77 * `Speaker Diarization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_diarization/intro.html>`_ 78 * Clustering Diarizer: TitaNet, ECAPA_TDNN, SpeakerNet 79 * Neural Diarizer: MSDD (Multi-scale Diarization Decoder) 80 * `Speech Intent Detection and Slot Filling <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_intent_slot/intro.html>`_: Conformer-Transformer 81 * `Pretrained models on different languages. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_: English, Spanish, German, Russian, Chinese, French, Italian, Polish, ... 82 * `NGC collection of pre-trained speech processing models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_ 83 * Natural Language Processing 84 * `NeMo Megatron pre-training of Large Language Models <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/nemo_megatron/intro.html>`_ 85 * `Neural Machine Translation (NMT) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/machine_translation/machine_translation.html>`_ 86 * `Punctuation and Capitalization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html>`_ 87 * `Token classification (named entity recognition) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/token_classification.html>`_ 88 * `Text classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_classification.html>`_ 89 * `Joint Intent and Slot Classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/joint_intent_slot.html>`_ 90 * `Question answering <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/question_answering.html>`_ 91 * `GLUE benchmark <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/glue_benchmark.html>`_ 92 * `Information retrieval <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/information_retrieval.html>`_ 93 * `Entity Linking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/entity_linking.html>`_ 94 * `Dialogue State Tracking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/sgd_qa.html>`_ 95 * `Prompt Learning <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/nemo_megatron/prompt_learning.html>`_ 96 * `NGC collection of pre-trained NLP models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_nlp>`_ 97 * `Synthetic Tabular Data Generation <https://developer.nvidia.com/blog/generating-synthetic-data-with-transformers-a-solution-for-enterprise-data-challenges/>`_ 98 * `Speech synthesis (TTS) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tts/intro.html#>`_ 99 * Spectrogram generation: Tacotron2, GlowTTS, TalkNet, FastPitch, FastSpeech2, Mixer-TTS, Mixer-TTS-X 100 * Vocoders: WaveGlow, SqueezeWave, UniGlow, MelGAN, HiFiGAN, UnivNet 101 * End-to-end speech generation: FastPitch_HifiGan_E2E, FastSpeech2_HifiGan_E2E, VITS 102 * `NGC collection of pre-trained TTS models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_tts>`_ 103 * `Tools <https://github.com/NVIDIA/NeMo/tree/stable/tools>`_ 104 * `Text Processing (text normalization and inverse text normalization) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization/intro.html>`_ 105 * `CTC-Segmentation tool <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/ctc_segmentation.html>`_ 106 * `Speech Data Explorer <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/speech_data_explorer.html>`_: a dash-based tool for interactive exploration of ASR/TTS datasets 107 108 109 Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes. 110 111 Requirements 112 ------------ 113 114 1) Python 3.8 or above 115 2) Pytorch 1.10.0 or above 116 3) NVIDIA GPU for training 117 118 Documentation 119 ------------- 120 121 .. |main| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 122 :alt: Documentation Status 123 :scale: 100% 124 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 125 126 .. |stable| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=stable 127 :alt: Documentation Status 128 :scale: 100% 129 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/ 130 131 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 132 | Version | Status | Description | 133 +=========+=============+==========================================================================================================================================+ 134 | Latest | |main| | `Documentation of the latest (i.e. main) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/>`_ | 135 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 136 | Stable | |stable| | `Documentation of the stable (i.e. most recent release) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/>`_ | 137 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 138 139 Tutorials 140 --------- 141 A great way to start with NeMo is by checking `one of our tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_. 142 143 Getting help with NeMo 144 ---------------------- 145 FAQ can be found on NeMo's `Discussions board <https://github.com/NVIDIA/NeMo/discussions>`_. You are welcome to ask questions or start discussions there. 146 147 148 Installation 149 ------------ 150 151 Conda 152 ~~~~~ 153 154 We recommend installing NeMo in a fresh Conda environment. 155 156 .. code-block:: bash 157 158 conda create --name nemo python==3.8 159 conda activate nemo 160 161 Install PyTorch using their `configurator <https://pytorch.org/get-started/locally/>`_. 162 163 .. code-block:: bash 164 165 conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch 166 167 .. note:: 168 169 The command used to install PyTorch may depend on your system. 170 171 Pip 172 ~~~ 173 Use this installation mode if you want the latest released version. 174 175 .. code-block:: bash 176 177 apt-get update && apt-get install -y libsndfile1 ffmpeg 178 pip install Cython 179 pip install nemo_toolkit['all'] 180 181 .. note:: 182 183 Depending on the shell used, you may need to use ``"nemo_toolkit[all]"`` instead in the above command. 184 185 Pip from source 186 ~~~~~~~~~~~~~~~ 187 Use this installation mode if you want the a version from particular GitHub branch (e.g main). 188 189 .. code-block:: bash 190 191 apt-get update && apt-get install -y libsndfile1 ffmpeg 192 pip install Cython 193 python -m pip install git+https://github.com/NVIDIA/NeMo.git@{BRANCH}#egg=nemo_toolkit[all] 194 195 196 From source 197 ~~~~~~~~~~~ 198 Use this installation mode if you are contributing to NeMo. 199 200 .. code-block:: bash 201 202 apt-get update && apt-get install -y libsndfile1 ffmpeg 203 git clone https://github.com/NVIDIA/NeMo 204 cd NeMo 205 ./reinstall.sh 206 207 .. note:: 208 209 If you only want the toolkit without additional conda-based dependencies, you may replace ``reinstall.sh`` 210 with ``pip install -e .`` when your PWD is the root of the NeMo repository. 211 212 RNNT 213 ~~~~ 214 Note that RNNT requires numba to be installed from conda. 215 216 .. code-block:: bash 217 218 conda remove numba 219 pip uninstall numba 220 conda install -c conda-forge numba 221 222 NeMo Megatron 223 ~~~~~~~~~~~~~ 224 NeMo Megatron training requires NVIDIA Apex to be installed. 225 Install it manually if not using the NVIDIA PyTorch container. 226 227 .. code-block:: bash 228 229 git clone https://github.com/ericharper/apex.git 230 cd apex 231 git checkout nm_v1.15.0 232 pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" --global-option="--fast_layer_norm" --global-option="--distributed_adam" --global-option="--deprecated_fused_adam" ./ 233 234 Transformer Engine 235 ~~~~~~~~~~~~~~~~~~ 236 NeMo Megatron GPT has been integrated with `NVIDIA Transformer Engine <https://github.com/NVIDIA/TransformerEngine>`_ 237 Transformer Engine enables FP8 training on NVIDIA Hopper GPUs. 238 `Install <https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/installation.html>`_ it manually if not using the NVIDIA PyTorch container. 239 240 .. note:: 241 242 Transformer Engine requires PyTorch to be built with CUDA 11.8. 243 244 NeMo Text Processing 245 ~~~~~~~~~~~~~~~~~~~~ 246 NeMo Text Processing, specifically (Inverse) Text Normalization, is now a separate repository `https://github.com/NVIDIA/NeMo-text-processing <https://github.com/NVIDIA/NeMo-text-processing>`_. 247 248 Docker containers: 249 ~~~~~~~~~~~~~~~~~~ 250 We release NeMo containers alongside NeMo releases. For example, NeMo ``r1.15.0`` comes with container ``nemo:22.12``, you may find more details about released containers in `releases page <https://github.com/NVIDIA/NeMo/releases>`_. 251 252 To use built container, please run 253 254 .. code-block:: bash 255 256 docker pull nvcr.io/nvidia/nemo:22.12 257 258 To build a nemo container with Dockerfile from a branch, please run 259 260 .. code-block:: bash 261 262 DOCKER_BUILDKIT=1 docker build -f Dockerfile -t nemo:latest . 263 264 265 If you chose to work with main branch, we recommend using NVIDIA's PyTorch container version 23.01-py3 and then installing from GitHub. 266 267 .. code-block:: bash 268 269 docker run --gpus all -it --rm -v <nemo_github_folder>:/NeMo --shm-size=8g \ 270 -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit \ 271 stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:23.01-py3 272 273 Examples 274 -------- 275 276 Many examples can be found under `"Examples" <https://github.com/NVIDIA/NeMo/tree/stable/examples>`_ folder. 277 278 279 Contributing 280 ------------ 281 282 We welcome community contributions! Please refer to the `CONTRIBUTING.md <https://github.com/NVIDIA/NeMo/blob/stable/CONTRIBUTING.md>`_ CONTRIBUTING.md for the process. 283 284 Publications 285 ------------ 286 287 We provide an ever growing list of publications that utilize the NeMo framework. Please refer to `PUBLICATIONS.md <https://github.com/NVIDIA/NeMo/tree/stable/PUBLICATIONS.md>`_. We welcome the addition of your own articles to this list ! 288 289 License 290 ------- 291 NeMo is under `Apache 2.0 license <https://github.com/NVIDIA/NeMo/blob/stable/LICENSE>`_. 292 [end of README.rst] [start of nemo/collections/tts/modules/spectrogram_enhancer.py] 1 # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # MIT License 16 # 17 # Copyright (c) 2020 Phil Wang 18 # 19 # Permission is hereby granted, free of charge, to any person obtaining a copy 20 # of this software and associated documentation files (the "Software"), to deal 21 # in the Software without restriction, including without limitation the rights 22 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 23 # copies of the Software, and to permit persons to whom the Software is 24 # furnished to do so, subject to the following conditions: 25 # 26 # The above copyright notice and this permission notice shall be included in all 27 # copies or substantial portions of the Software. 28 # 29 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 32 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 33 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 34 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 # SOFTWARE. 36 37 # The following is largely based on code from https://github.com/lucidrains/stylegan2-pytorch 38 39 import math 40 from functools import partial 41 from math import log2 42 from typing import List 43 44 import torch 45 import torch.nn.functional as F 46 from einops import rearrange 47 from kornia.filters import filter2d 48 49 from nemo.collections.tts.helpers.helpers import mask_sequence_tensor 50 51 52 class Blur(torch.nn.Module): 53 def __init__(self): 54 super().__init__() 55 f = torch.Tensor([1, 2, 1]) 56 self.register_buffer("f", f) 57 58 def forward(self, x): 59 f = self.f 60 f = f[None, None, :] * f[None, :, None] 61 return filter2d(x, f, normalized=True) 62 63 64 class EqualLinear(torch.nn.Module): 65 def __init__(self, in_dim, out_dim, lr_mul=1, bias=True): 66 super().__init__() 67 self.weight = torch.nn.Parameter(torch.randn(out_dim, in_dim)) 68 if bias: 69 self.bias = torch.nn.Parameter(torch.zeros(out_dim)) 70 71 self.lr_mul = lr_mul 72 73 def forward(self, input): 74 return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul) 75 76 77 class StyleMapping(torch.nn.Module): 78 def __init__(self, emb, depth, lr_mul=0.1): 79 super().__init__() 80 81 layers = [] 82 for _ in range(depth): 83 layers.extend([EqualLinear(emb, emb, lr_mul), torch.nn.LeakyReLU(0.2, inplace=True)]) 84 85 self.net = torch.nn.Sequential(*layers) 86 87 def forward(self, x): 88 x = F.normalize(x, dim=1) 89 return self.net(x) 90 91 92 class RGBBlock(torch.nn.Module): 93 def __init__(self, latent_dim, input_channel, upsample, channels=3): 94 super().__init__() 95 self.input_channel = input_channel 96 self.to_style = torch.nn.Linear(latent_dim, input_channel) 97 98 out_filters = channels 99 self.conv = Conv2DModulated(input_channel, out_filters, 1, demod=False) 100 101 self.upsample = ( 102 torch.nn.Sequential(torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), Blur(),) 103 if upsample 104 else None 105 ) 106 107 def forward(self, x, prev_rgb, istyle): 108 style = self.to_style(istyle) 109 x = self.conv(x, style) 110 111 if prev_rgb is not None: 112 x = x + prev_rgb 113 114 if self.upsample is not None: 115 x = self.upsample(x) 116 117 return x 118 119 120 class Conv2DModulated(torch.nn.Module): 121 """ 122 Modulated convolution. 123 For details refer to [1] 124 [1] Karras et. al. - Analyzing and Improving the Image Quality of StyleGAN (https://arxiv.org/abs/1912.04958) 125 """ 126 127 def __init__( 128 self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps=1e-8, **kwargs, 129 ): 130 super().__init__() 131 self.filters = out_chan 132 self.demod = demod 133 self.kernel = kernel 134 self.stride = stride 135 self.dilation = dilation 136 self.weight = torch.nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel))) 137 self.eps = eps 138 torch.nn.init.kaiming_normal_(self.weight, a=0, mode="fan_in", nonlinearity="leaky_relu") 139 140 def _get_same_padding(self, size, kernel, dilation, stride): 141 return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2 142 143 def forward(self, x, y): 144 b, c, h, w = x.shape 145 146 w1 = y[:, None, :, None, None] 147 w2 = self.weight[None, :, :, :, :] 148 weights = w2 * (w1 + 1) 149 150 if self.demod: 151 d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) 152 weights = weights * d 153 154 x = x.reshape(1, -1, h, w) 155 156 _, _, *ws = weights.shape 157 weights = weights.reshape(b * self.filters, *ws) 158 159 padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride) 160 x = F.conv2d(x, weights, padding=padding, groups=b) 161 162 x = x.reshape(-1, self.filters, h, w) 163 return x 164 165 166 class GeneratorBlock(torch.nn.Module): 167 def __init__( 168 self, latent_dim, input_channels, filters, upsample=True, upsample_rgb=True, channels=1, 169 ): 170 super().__init__() 171 self.upsample = torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) if upsample else None 172 173 self.to_style1 = torch.nn.Linear(latent_dim, input_channels) 174 self.to_noise1 = torch.nn.Linear(1, filters) 175 self.conv1 = Conv2DModulated(input_channels, filters, 3) 176 177 self.to_style2 = torch.nn.Linear(latent_dim, filters) 178 self.to_noise2 = torch.nn.Linear(1, filters) 179 self.conv2 = Conv2DModulated(filters, filters, 3) 180 181 self.activation = torch.nn.LeakyReLU(0.2, inplace=True) 182 self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, channels) 183 184 def forward(self, x, prev_rgb, istyle, inoise): 185 if self.upsample is not None: 186 x = self.upsample(x) 187 188 inoise = inoise[:, : x.shape[2], : x.shape[3], :] 189 noise1 = self.to_noise1(inoise).permute((0, 3, 1, 2)) 190 noise2 = self.to_noise2(inoise).permute((0, 3, 1, 2)) 191 192 style1 = self.to_style1(istyle) 193 x = self.conv1(x, style1) 194 x = self.activation(x + noise1) 195 196 style2 = self.to_style2(istyle) 197 x = self.conv2(x, style2) 198 x = self.activation(x + noise2) 199 200 rgb = self.to_rgb(x, prev_rgb, istyle) 201 return x, rgb 202 203 204 class DiscriminatorBlock(torch.nn.Module): 205 def __init__(self, input_channels, filters, downsample=True): 206 super().__init__() 207 self.conv_res = torch.nn.Conv2d(input_channels, filters, 1, stride=(2 if downsample else 1)) 208 209 self.net = torch.nn.Sequential( 210 torch.nn.Conv2d(input_channels, filters, 3, padding=1), 211 torch.nn.LeakyReLU(0.2, inplace=True), 212 torch.nn.Conv2d(filters, filters, 3, padding=1), 213 torch.nn.LeakyReLU(0.2, inplace=True), 214 ) 215 216 self.downsample = ( 217 torch.nn.Sequential(Blur(), torch.nn.Conv2d(filters, filters, 3, padding=1, stride=2)) 218 if downsample 219 else None 220 ) 221 222 def forward(self, x): 223 res = self.conv_res(x) 224 x = self.net(x) 225 if self.downsample is not None: 226 x = self.downsample(x) 227 x = (x + res) * (1 / math.sqrt(2)) 228 return x 229 230 231 class Generator(torch.nn.Module): 232 def __init__( 233 self, 234 n_bands, 235 latent_dim, 236 style_depth, 237 network_capacity=16, 238 channels=1, 239 fmap_max=512, 240 max_spectrogram_length=2000, 241 ): 242 super().__init__() 243 self.image_size = n_bands 244 self.latent_dim = latent_dim 245 self.num_layers = int(log2(n_bands) - 1) 246 self.style_depth = style_depth 247 248 self.style_mapping = StyleMapping(self.latent_dim, self.style_depth, lr_mul=0.1) 249 250 filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1] 251 252 set_fmap_max = partial(min, fmap_max) 253 filters = list(map(set_fmap_max, filters)) 254 init_channels = filters[0] 255 filters = [init_channels, *filters] 256 257 in_out_pairs = zip(filters[:-1], filters[1:]) 258 259 self.initial_conv = torch.nn.Conv2d(filters[0], filters[0], 3, padding=1) 260 self.blocks = torch.nn.ModuleList([]) 261 262 for ind, (in_chan, out_chan) in enumerate(in_out_pairs): 263 not_first = ind != 0 264 not_last = ind != (self.num_layers - 1) 265 266 block = GeneratorBlock( 267 latent_dim, in_chan, out_chan, upsample=not_first, upsample_rgb=not_last, channels=channels, 268 ) 269 self.blocks.append(block) 270 271 for m in self.modules(): 272 if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)): 273 torch.nn.init.kaiming_normal_(m.weight, a=0, mode="fan_in", nonlinearity="leaky_relu") 274 for block in self.blocks: 275 torch.nn.init.zeros_(block.to_noise1.weight) 276 torch.nn.init.zeros_(block.to_noise1.bias) 277 torch.nn.init.zeros_(block.to_noise2.weight) 278 torch.nn.init.zeros_(block.to_noise2.bias) 279 280 initial_block_size = n_bands // self.upsample_factor, math.ceil(max_spectrogram_length / self.upsample_factor) 281 self.initial_block = torch.nn.Parameter( 282 torch.randn((1, init_channels, *initial_block_size)), requires_grad=False 283 ) 284 285 def add_scaled_condition(self, target: torch.Tensor, condition: torch.Tensor, condition_lengths: torch.Tensor): 286 *_, target_height, _ = target.shape 287 *_, height, _ = condition.shape 288 289 scale = height // target_height 290 291 # scale appropriately 292 condition = F.interpolate(condition, size=target.shape[-2:], mode="bilinear") 293 294 # add and mask 295 result = (target + condition) / 2 296 result = mask_sequence_tensor(result, (condition_lengths / scale).ceil().long()) 297 298 return result 299 300 @property 301 def upsample_factor(self): 302 return 2 ** sum(1 for block in self.blocks if block.upsample) 303 304 def forward(self, condition: torch.Tensor, lengths: torch.Tensor, ws: List[torch.Tensor], noise: torch.Tensor): 305 batch_size, _, _, max_length = condition.shape 306 307 x = self.initial_block.expand(batch_size, -1, -1, -1) 308 x = x[:, :, :, : max_length // self.upsample_factor] 309 310 rgb = None 311 x = self.initial_conv(x) 312 313 for style, block in zip(ws, self.blocks): 314 x, rgb = block(x, rgb, style, noise) 315 316 x = self.add_scaled_condition(x, condition, lengths) 317 rgb = self.add_scaled_condition(rgb, condition, lengths) 318 319 return rgb 320 321 322 class Discriminator(torch.nn.Module): 323 def __init__( 324 self, n_bands, network_capacity=16, channels=1, fmap_max=512, 325 ): 326 super().__init__() 327 num_layers = int(log2(n_bands) - 1) 328 num_init_filters = channels 329 330 blocks = [] 331 filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)] 332 333 set_fmap_max = partial(min, fmap_max) 334 filters = list(map(set_fmap_max, filters)) 335 chan_in_out = list(zip(filters[:-1], filters[1:])) 336 337 blocks = [] 338 339 for ind, (in_chan, out_chan) in enumerate(chan_in_out): 340 is_not_last = ind != (len(chan_in_out) - 1) 341 342 block = DiscriminatorBlock(in_chan, out_chan, downsample=is_not_last) 343 blocks.append(block) 344 345 self.blocks = torch.nn.ModuleList(blocks) 346 347 channel_last = filters[-1] 348 latent_dim = channel_last 349 350 self.final_conv = torch.nn.Conv2d(channel_last, channel_last, 3, padding=1) 351 self.to_logit = torch.nn.Linear(latent_dim, 1) 352 353 for m in self.modules(): 354 if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)): 355 torch.nn.init.kaiming_normal_(m.weight, a=0, mode="fan_in", nonlinearity="leaky_relu") 356 357 def forward(self, x, condition: torch.Tensor, lengths: torch.Tensor): 358 for block in self.blocks: 359 x = block(x) 360 scale = condition.shape[-1] // x.shape[-1] 361 x = mask_sequence_tensor(x, (lengths / scale).ceil().long()) 362 363 x = self.final_conv(x) 364 365 scale = condition.shape[-1] // x.shape[-1] 366 x = mask_sequence_tensor(x, (lengths / scale).ceil().long()) 367 368 x = x.mean(axis=-2) 369 x = (x / rearrange(lengths / scale, "b -> b 1 1")).sum(axis=-1) 370 x = self.to_logit(x) 371 return x.squeeze() 372 [end of nemo/collections/tts/modules/spectrogram_enhancer.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
NVIDIA/NeMo
66aeb4c36dd86a777cc47e9878e701bd8029b654
Spectrogram Enhancer doesn't generalize to spectrogram lengths unseen during training **Describe the bug** If one trains TTS spectrogram enhancer (#5565) on short spectrograms, at inference time it doesn't generalize to longer ones. A patch in the beginning gets enhanced but further frames do not. Example (before, after): ![before](https://user-images.githubusercontent.com/8864149/218520151-230501b1-4e9d-4307-b51e-526440b915c8.png) ![after](https://user-images.githubusercontent.com/8864149/218520199-22187a4b-9cf8-4ac9-b3ce-07c320d3d3b6.png) **Steps/Code to reproduce bug** 1. Train a spectrogram enhancer 2. Apply it to a spectrogram that's longer than anything from the training set 3. Only a patch in the beginning gets enhanced **Expected behavior** The whole spectrogram should have got additional details, not just the first patch
A temporary fix: given a trained model, clone first patch of the initial tensor length-wise: ``` max_init_length = enhancer.generator.initial_block.shape[-1] m = 6 for i in range(1, max_init_length // m): enhancer.generator.initial_block.data[:,:,:,i*m:(i+1)*m] = enhancer.generator.initial_block.data[:,:,:,0:m] ``` Visually fix works, not sure what about downstream tasks ![after_temporary_fix](https://user-images.githubusercontent.com/8864149/218524157-9283395d-f196-41aa-81fe-f8868c2acb67.png) As-is most likely breaks #5659 (setup with enhancer)
2023-02-23T22:43:15Z
<patch> diff --git a/nemo/collections/tts/modules/spectrogram_enhancer.py b/nemo/collections/tts/modules/spectrogram_enhancer.py --- a/nemo/collections/tts/modules/spectrogram_enhancer.py +++ b/nemo/collections/tts/modules/spectrogram_enhancer.py @@ -230,14 +230,7 @@ def forward(self, x): class Generator(torch.nn.Module): def __init__( - self, - n_bands, - latent_dim, - style_depth, - network_capacity=16, - channels=1, - fmap_max=512, - max_spectrogram_length=2000, + self, n_bands, latent_dim, style_depth, network_capacity=16, channels=1, fmap_max=512, ): super().__init__() self.image_size = n_bands @@ -277,7 +270,7 @@ def __init__( torch.nn.init.zeros_(block.to_noise2.weight) torch.nn.init.zeros_(block.to_noise2.bias) - initial_block_size = n_bands // self.upsample_factor, math.ceil(max_spectrogram_length / self.upsample_factor) + initial_block_size = n_bands // self.upsample_factor, 1 self.initial_block = torch.nn.Parameter( torch.randn((1, init_channels, *initial_block_size)), requires_grad=False ) @@ -304,8 +297,7 @@ def upsample_factor(self): def forward(self, condition: torch.Tensor, lengths: torch.Tensor, ws: List[torch.Tensor], noise: torch.Tensor): batch_size, _, _, max_length = condition.shape - x = self.initial_block.expand(batch_size, -1, -1, -1) - x = x[:, :, :, : max_length // self.upsample_factor] + x = self.initial_block.expand(batch_size, -1, -1, max_length // self.upsample_factor) rgb = None x = self.initial_conv(x) </patch>
diff --git a/tests/collections/tts/test_spectrogram_enhancer.py b/tests/collections/tts/test_spectrogram_enhancer.py --- a/tests/collections/tts/test_spectrogram_enhancer.py +++ b/tests/collections/tts/test_spectrogram_enhancer.py @@ -37,7 +37,6 @@ def enhancer_config(): "network_capacity": network_capacity, "mixed_prob": 0.9, "fmap_max": fmap_max, - "max_spectrogram_length": 2000, "generator": { "_target_": "nemo.collections.tts.modules.spectrogram_enhancer.Generator", "n_bands": n_bands, @@ -45,7 +44,6 @@ def enhancer_config(): "network_capacity": network_capacity, "style_depth": style_depth, "fmap_max": fmap_max, - "max_spectrogram_length": 2000, }, "discriminator": { "_target_": "nemo.collections.tts.modules.spectrogram_enhancer.Discriminator",
1.0
NVIDIA__NeMo-3159
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Punctuation data set uses too much memory **Describe the bug** Punctuation datasets cannot be constructed with more than a few million examples (depending on the max length). Even these small datasets can consume a huge amount of memory while preprocessing (100's of GB) and while training (10's of GB per process). This issue was mentioned in passing in https://github.com/NVIDIA/NeMo/issues/2569 though it was not the main issue discussed there and thus not fixed. Assuming it is an acceptable idea to load the entire dataset into memory in the first place, these are the points where there seems to be an excessive use of memory. The function with issues (`get_features`): https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py#L34 The main issues: 1. Why save `all_segment_ids`? It appears there is nowhere this is set to anything but a constant 0. `__getitem__` can deal with it. 2. Why pad to the max length? `collate_fn` can deal with that, and could do so much more efficiently (by only padding to the max length of the batch). 3. Loss mask and input mask can be generated by `__getitem__` or `collate_fn` 4. Subtokens mask could be generated later as well, albeit not trivially. **Expected behavior** Should be able to use a dataset with 10's of millions of lines. </issue> <code> [start of README.rst] 1 2 |status| |documentation| |license| |lgtm_grade| |lgtm_alerts| |black| 3 4 .. |status| image:: http://www.repostatus.org/badges/latest/active.svg 5 :target: http://www.repostatus.org/#active 6 :alt: Project Status: Active – The project has reached a stable, usable state and is being actively developed. 7 8 .. |documentation| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 9 :alt: Documentation 10 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 11 12 .. |license| image:: https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg 13 :target: https://github.com/NVIDIA/NeMo/blob/master/LICENSE 14 :alt: NeMo core license and license for collections in this repo 15 16 .. |lgtm_grade| image:: https://img.shields.io/lgtm/grade/python/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 17 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/context:python 18 :alt: Language grade: Python 19 20 .. |lgtm_alerts| image:: https://img.shields.io/lgtm/alerts/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 21 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/alerts/ 22 :alt: Total alerts 23 24 .. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg 25 :target: https://github.com/psf/black 26 :alt: Code style: black 27 28 .. _main-readme: 29 30 **NVIDIA NeMo** 31 =============== 32 33 Introduction 34 ------------ 35 36 NVIDIA NeMo is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), natural language processing (NLP), and text-to-speech synthesis (TTS). 37 The primary objective of NeMo is to help researchers from industry and academia to reuse prior work (code and pretrained models and make it easier to create new `conversational AI models <https://developer.nvidia.com/conversational-ai#started>`_. 38 39 `Pre-trained NeMo models. <https://catalog.ngc.nvidia.com/models?query=nemo&orderBy=weightPopularDESC>`_ 40 41 `Introductory video. <https://www.youtube.com/embed/wBgpMf_KQVw>`_ 42 43 Key Features 44 ------------ 45 46 * Speech processing 47 * `Automatic Speech Recognition (ASR) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/intro.html>`_ 48 * Supported models: Jasper, QuartzNet, CitriNet, Conformer-CTC, Conformer-Transducer, ContextNet, ... 49 * Supports CTC and Transducer/RNNT losses/decoders 50 * Beam Search decoding 51 * `Language Modelling for ASR <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html>`_: N-gram LM in fusion with Beam Search decoding, Neural Rescoring with Transformer 52 * `Speech Classification and Speech Command Recognition <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/intro.html>`_: MatchboxNet (Command Recognition) 53 * `Voice activity Detection (VAD) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speech_classification/models.html#marblenet-vad>`_: MarbleNet 54 * `Speaker Recognition <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/intro.html>`_: SpeakerNet, ECAPA_TDNN 55 * `Speaker Diarization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_diarization/intro.html>`_: SpeakerNet, ECAPA_TDNN 56 * `Pretrained models on different languages. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_: English, Spanish, German, Russian, Chinese, French, Italian, Polish, ... 57 * `NGC collection of pre-trained speech processing models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_ 58 * Natural Language Processing 59 * `Compatible with Hugging Face Transformers and NVIDIA Megatron <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/megatron_finetuning.html>`_ 60 * `Neural Machine Translation (NMT) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/machine_translation.html>`_ 61 * `Punctuation and Capitalization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html>`_ 62 * `Token classification (named entity recognition) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/token_classification.html>`_ 63 * `Text classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_classification.html>`_ 64 * `Joint Intent and Slot Classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/joint_intent_slot.html>`_ 65 * `BERT pre-training <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/bert_pretraining.html>`_ 66 * `Question answering <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/question_answering.html>`_ 67 * `GLUE benchmark <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/glue_benchmark.html>`_ 68 * `Information retrieval <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/information_retrieval.html>`_ 69 * `Entity Linking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/entity_linking.html>`_ 70 * `Dialogue State Tracking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/sgd_qa.html>`_ 71 * `Neural Duplex Text Normalization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization.html>`_ 72 * `NGC collection of pre-trained NLP models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_nlp>`_ 73 * `Speech synthesis (TTS) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tts/intro.html#>`_ 74 * Spectrogram generation: Tacotron2, GlowTTS, FastSpeech2, FastPitch, FastSpeech2 75 * Vocoders: WaveGlow, SqueezeWave, UniGlow, MelGAN, HiFiGAN 76 * End-to-end speech generation: FastPitch_HifiGan_E2E, FastSpeech2_HifiGan_E2E 77 * `NGC collection of pre-trained TTS models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_tts>`_ 78 * `Tools <https://github.com/NVIDIA/NeMo/tree/main/tools>`_ 79 * `Text Processing (text normalization and inverse text normalization) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/text_processing_deployment.html>`_ 80 * `CTC-Segmentation tool <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/ctc_segmentation.html>`_ 81 * `Speech Data Explorer <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/speech_data_explorer.html>`_: a dash-based tool for interactive exploration of ASR/TTS datasets 82 83 84 Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes. 85 86 Requirements 87 ------------ 88 89 1) Python 3.6, 3.7 or 3.8 90 2) Pytorch 1.10.0 or above 91 3) NVIDIA GPU for training 92 93 Documentation 94 ------------- 95 96 .. |main| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 97 :alt: Documentation Status 98 :scale: 100% 99 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 100 101 .. |stable| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=stable 102 :alt: Documentation Status 103 :scale: 100% 104 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/ 105 106 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 107 | Version | Status | Description | 108 +=========+=============+==========================================================================================================================================+ 109 | Latest | |main| | `Documentation of the latest (i.e. main) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/>`_ | 110 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 111 | Stable | |stable| | `Documentation of the stable (i.e. most recent release) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/>`_ | 112 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 113 114 Tutorials 115 --------- 116 A great way to start with NeMo is by checking `one of our tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_. 117 118 Getting help with NeMo 119 ---------------------- 120 FAQ can be found on NeMo's `Discussions board <https://github.com/NVIDIA/NeMo/discussions>`_. You are welcome to ask questions or start discussions there. 121 122 123 Installation 124 ------------ 125 126 Pip 127 ~~~ 128 Use this installation mode if you want the latest released version. 129 130 .. code-block:: bash 131 132 apt-get update && apt-get install -y libsndfile1 ffmpeg 133 pip install Cython 134 pip install nemo_toolkit['all'] 135 136 Pip from source 137 ~~~~~~~~~~~~~~~ 138 Use this installation mode if you want the a version from particular GitHub branch (e.g main). 139 140 .. code-block:: bash 141 142 apt-get update && apt-get install -y libsndfile1 ffmpeg 143 pip install Cython 144 python -m pip install git+https://github.com/NVIDIA/NeMo.git@{BRANCH}#egg=nemo_toolkit[all] 145 146 147 From source 148 ~~~~~~~~~~~ 149 Use this installation mode if you are contributing to NeMo. 150 151 .. code-block:: bash 152 153 apt-get update && apt-get install -y libsndfile1 ffmpeg 154 git clone https://github.com/NVIDIA/NeMo 155 cd NeMo 156 ./reinstall.sh 157 158 RNNT 159 ~~~~ 160 Note that RNNT requires numba to be installed from conda. 161 162 .. code-block:: bash 163 164 conda remove numba 165 pip uninstall numba 166 conda install -c numba numba 167 168 Megatron GPT 169 ~~~~~~~~~~~~ 170 Megatron GPT training requires NVIDIA Apex to be installed. 171 172 .. code-block:: bash 173 174 git clone https://github.com/NVIDIA/apex 175 cd apex 176 git checkout 14ccf5986401104121d0ef286a29386904af3bb7 177 pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ 178 179 Docker containers: 180 ~~~~~~~~~~~~~~~~~~ 181 To build a nemo container with Dockerfile from a branch, please run 182 183 .. code-block:: bash 184 DOCKER_BUILDKIT=1 docker build -f Dockerfile -t nemo:latest . 185 186 187 If you chose to work with main branch, we recommend using NVIDIA's PyTorch container version 21.11-py3 and then installing from GitHub. 188 189 .. code-block:: bash 190 191 docker run --gpus all -it --rm -v <nemo_github_folder>:/NeMo --shm-size=8g \ 192 -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit \ 193 stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:21.11-py3 194 195 Examples 196 -------- 197 198 Many examples can be found under `"Examples" <https://github.com/NVIDIA/NeMo/tree/stable/examples>`_ folder. 199 200 201 Contributing 202 ------------ 203 204 We welcome community contributions! Please refer to the `CONTRIBUTING.md <https://github.com/NVIDIA/NeMo/blob/stable/CONTRIBUTING.md>`_ CONTRIBUTING.md for the process. 205 206 Publications 207 ------------ 208 209 We provide an ever growing list of publications that utilize the NeMo framework. Please refer to `PUBLICATIONS.md <https://github.com/NVIDIA/NeMo/blob/main/PUBLICATIONS.md>`_. We welcome the addition of your own articles to this list ! 210 211 Citation 212 -------- 213 214 .. code-block:: bash 215 216 @article{kuchaiev2019nemo, 217 title={Nemo: a toolkit for building ai applications using neural modules}, 218 author={Kuchaiev, Oleksii and Li, Jason and Nguyen, Huyen and Hrinchuk, Oleksii and Leary, Ryan and Ginsburg, Boris and Kriman, Samuel and Beliaev, Stanislav and Lavrukhin, Vitaly and Cook, Jack and others}, 219 journal={arXiv preprint arXiv:1909.09577}, 220 year={2019} 221 } 222 223 License 224 ------- 225 NeMo is under `Apache 2.0 license <https://github.com/NVIDIA/NeMo/blob/stable/LICENSE>`_. 226 [end of README.rst] [start of /dev/null] 1 [end of /dev/null] [start of examples/nlp/token_classification/punctuate_capitalize_infer.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import argparse 16 import json 17 from pathlib import Path 18 19 import torch.cuda 20 21 from nemo.collections.nlp.models import PunctuationCapitalizationModel 22 23 24 """ 25 This script is for restoring punctuation and capitalization. 26 27 Usage example: 28 29 python punctuate_capitalize.py \ 30 --input_manifest <PATH_TO_INPUT_MANIFEST> \ 31 --output_manifest <PATH_TO_OUTPUT_MANIFEST> 32 33 <PATH_TO_INPUT_MANIFEST> is a path to NeMo ASR manifest. Usually it is an output of 34 NeMo/examples/asr/transcribe_speech.py but can be a manifest with 'text' key. Alternatively you can use 35 --input_text parameter for passing text for inference. 36 <PATH_TO_OUTPUT_MANIFEST> is a path to NeMo ASR manifest into which script output will be written. Alternatively 37 you can use parameter --output_text. 38 39 For more details on this script usage look in argparse help. 40 """ 41 42 43 def get_args(): 44 default_model_parameter = "pretrained_name" 45 default_model = "punctuation_en_bert" 46 parser = argparse.ArgumentParser( 47 formatter_class=argparse.ArgumentDefaultsHelpFormatter, 48 description="The script is for restoring punctuation and capitalization in text. Long strings are split into " 49 "segments of length `--max_seq_length`. `--max_seq_length` is the length which includes [CLS] and [SEP] " 50 "tokens. Parameter `--step` controls segments overlapping. `--step` is a distance between beginnings of " 51 "consequent segments. Model outputs for tokens near the borders of tensors are less accurate and can be " 52 "discarded before final predictions computation. Parameter `--margin` is number of discarded outputs near " 53 "segments borders. Probabilities of tokens in overlapping parts of segments multiplied before selecting the " 54 "best prediction. Default values of parameters `--max_seq_length`, `--step`, and `--margin` are optimal for " 55 "IWSLT 2019 test dataset.", 56 ) 57 input_ = parser.add_mutually_exclusive_group(required=True) 58 input_.add_argument( 59 "--input_manifest", 60 "-m", 61 type=Path, 62 help="Path to the file with NeMo manifest which needs punctuation and capitalization. If the first element " 63 "of manifest contains key 'pred_text', 'pred_text' values are passed for tokenization. Otherwise 'text' " 64 "values are passed for punctuation and capitalization. Exactly one parameter of `--input_manifest` and " 65 "`--input_text` should be provided.", 66 ) 67 input_.add_argument( 68 "--input_text", 69 "-t", 70 type=Path, 71 help="Path to file with text which needs punctuation and capitalization. Exactly one parameter of " 72 "`--input_manifest` and `--input_text` should be provided.", 73 ) 74 output = parser.add_mutually_exclusive_group(required=True) 75 output.add_argument( 76 "--output_manifest", 77 "-M", 78 type=Path, 79 help="Path to output NeMo manifest. Text with restored punctuation and capitalization will be saved in " 80 "'pred_text' elements if 'pred_text' key is present in the input manifest. Otherwise text with restored " 81 "punctuation and capitalization will be saved in 'text' elements. Exactly one parameter of `--output_manifest` " 82 "and `--output_text` should be provided.", 83 ) 84 output.add_argument( 85 "--output_text", 86 "-T", 87 type=Path, 88 help="Path to file with text with restored punctuation and capitalization. Exactly one parameter of " 89 "`--output_manifest` and `--output_text` should be provided.", 90 ) 91 model = parser.add_mutually_exclusive_group(required=False) 92 model.add_argument( 93 "--pretrained_name", 94 "-p", 95 help=f"The name of NGC pretrained model. No more than one of parameters `--pretrained_name`, `--model_path`" 96 f"should be provided. If neither of parameters `--pretrained_name` and `--model_path` are provided, then the " 97 f"script is run with `--{default_model_parameter}={default_model}`.", 98 choices=[m.pretrained_model_name for m in PunctuationCapitalizationModel.list_available_models()], 99 ) 100 model.add_argument( 101 "--model_path", 102 "-P", 103 type=Path, 104 help=f"Path to .nemo checkpoint of punctuation and capitalization model. No more than one of parameters " 105 f"`--pretrained_name` and `--model_path` should be provided. If neither of parameters `--pretrained_name` and " 106 f"`--model_path` are provided, then the script is run with `--{default_model_parameter}={default_model}`.", 107 ) 108 parser.add_argument( 109 "--max_seq_length", 110 "-L", 111 type=int, 112 default=64, 113 help="Length of segments into which queries are split. `--max_seq_length` includes [CLS] and [SEP] tokens.", 114 ) 115 parser.add_argument( 116 "--step", 117 "-s", 118 type=int, 119 default=8, 120 help="Relative shift of consequent segments into which long queries are split. Long queries are split into " 121 "segments which can overlap. Parameter `step` controls such overlapping. Imagine that queries are " 122 "tokenized into characters, `max_seq_length=5`, and `step=2`. In such a case query 'hello' is tokenized " 123 "into segments `[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]`.", 124 ) 125 parser.add_argument( 126 "--margin", 127 "-g", 128 type=int, 129 default=16, 130 help="A number of subtokens in the beginning and the end of segments which output probabilities are not used " 131 "for prediction computation. The first segment does not have left margin and the last segment does not have " 132 "right margin. For example, if input sequence is tokenized into characters, `max_seq_length=5`, `step=1`, " 133 "and `margin=1`, then query 'hello' will be tokenized into segments `[['[CLS]', 'h', 'e', 'l', '[SEP]'], " 134 "['[CLS]', 'e', 'l', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]`. These segments are passed to the " 135 "model. Before final predictions computation, margins are removed. In the next list, subtokens which logits " 136 "are not used for final predictions computation are marked with asterisk: `[['[CLS]'*, 'h', 'e', 'l'*, " 137 "'[SEP]'*], ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]`.", 138 ) 139 parser.add_argument( 140 "--batch_size", "-b", type=int, default=128, help="Number of segments which are processed simultaneously.", 141 ) 142 parser.add_argument( 143 "--save_labels_instead_of_text", 144 "-B", 145 action="store_true", 146 help="If this option is set, then punctuation and capitalization labels are saved instead text with restored " 147 "punctuation and capitalization. Labels are saved in format described here " 148 "https://docs.nvidia.com/deeplearning/nemo/" 149 "user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format", 150 ) 151 parser.add_argument( 152 "--device", 153 "-d", 154 choices=['cpu', 'cuda'], 155 help="Which device to use. If device is not set and CUDA is available, then GPU will be used. If device is " 156 "not set and CUDA is not available, then CPU is used.", 157 ) 158 args = parser.parse_args() 159 if args.input_manifest is None and args.output_manifest is not None: 160 parser.error("--output_manifest requires --input_manifest") 161 if args.pretrained_name is None and args.model_path is None: 162 setattr(args, default_model_parameter, default_model) 163 for name in ["input_manifest", "input_text", "output_manifest", "output_text", "model_path"]: 164 if getattr(args, name) is not None: 165 setattr(args, name, getattr(args, name).expanduser()) 166 return args 167 168 169 def load_manifest(manifest: Path): 170 result = [] 171 with manifest.open() as f: 172 for i, line in enumerate(f): 173 data = json.loads(line) 174 result.append(data) 175 return result 176 177 178 def main(): 179 args = get_args() 180 if args.pretrained_name is None: 181 model = PunctuationCapitalizationModel.restore_from(args.model_path) 182 else: 183 model = PunctuationCapitalizationModel.from_pretrained(args.pretrained_name) 184 if args.device is None: 185 if torch.cuda.is_available(): 186 model = model.cuda() 187 else: 188 model = model.cpu() 189 else: 190 model = model.to(args.device) 191 model = model.cpu() 192 if args.input_manifest is None: 193 texts = [] 194 with args.input_text.open() as f: 195 for line in f: 196 texts.append(line.strip()) 197 else: 198 manifest = load_manifest(args.input_manifest) 199 text_key = "pred_text" if "pred_text" in manifest[0] else "text" 200 texts = [] 201 for item in manifest: 202 texts.append(item[text_key]) 203 processed_texts = model.add_punctuation_capitalization( 204 texts, 205 batch_size=args.batch_size, 206 max_seq_length=args.max_seq_length, 207 step=args.step, 208 margin=args.margin, 209 return_labels=args.save_labels_instead_of_text, 210 ) 211 if args.output_manifest is None: 212 args.output_text.parent.mkdir(exist_ok=True, parents=True) 213 with args.output_text.open('w') as f: 214 for t in processed_texts: 215 f.write(t + '\n') 216 else: 217 args.output_manifest.parent.mkdir(exist_ok=True, parents=True) 218 with args.output_manifest.open('w') as f: 219 for item, t in zip(manifest, processed_texts): 220 item[text_key] = t 221 f.write(json.dumps(item) + '\n') 222 223 224 if __name__ == "__main__": 225 main() 226 [end of examples/nlp/token_classification/punctuate_capitalize_infer.py] [start of examples/nlp/token_classification/punctuation_capitalization_evaluate.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 17 import pytorch_lightning as pl 18 from omegaconf import DictConfig 19 20 from nemo.collections.nlp.models import PunctuationCapitalizationModel 21 from nemo.core.config import hydra_runner 22 from nemo.utils import logging 23 from nemo.utils.exp_manager import exp_manager 24 25 26 """ 27 This script shows how to perform evaluation and runs inference of a few examples. 28 29 More details on the task and data format could be found in tutorials/nlp/Punctuation_and_Capitalization.ipynb 30 31 *** Setting the configs *** 32 33 This script uses the `/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml` config file 34 by default. You may update the config file from the file directly. 35 The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'. 36 37 For more details about the config files and different ways of model restoration, see tutorials/00_NeMo_Primer.ipynb 38 39 40 *** Model Evaluation *** 41 42 python punctuation_capitalization_evaluate.py \ 43 model.dataset.data_dir=<PATH_TO_DATA_DIR> \ 44 pretrained_model=punctuation_en_distilbert 45 46 <PATH_TO_DATA_DIR> - a directory that contains test_ds.text_file and test_ds.labels_file (see the config) 47 pretrained_model - pretrained PunctuationCapitalizationModel model from list_available_models() or 48 path to a .nemo file, for example: punctuation_en_bert or your_model.nemo 49 50 """ 51 52 53 @hydra_runner(config_path="conf", config_name="punctuation_capitalization_config") 54 def main(cfg: DictConfig) -> None: 55 logging.info( 56 'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU and \ 57 no DDP to obtain accurate results' 58 ) 59 60 if not hasattr(cfg.model, 'test_ds'): 61 raise ValueError(f'model.test_ds was not found in the config, skipping evaluation') 62 else: 63 gpu = 1 if cfg.trainer.gpus != 0 else 0 64 65 trainer = pl.Trainer(gpus=gpu, precision=cfg.trainer.precision, logger=False, checkpoint_callback=False,) 66 exp_dir = exp_manager(trainer, cfg.exp_manager) 67 68 if not cfg.pretrained_model: 69 raise ValueError( 70 'To run evaluation and inference script a pre-trained model or .nemo file must be provided.' 71 f'Choose from {PunctuationCapitalizationModel.list_available_models()} or "pretrained_model"="your_model.nemo"' 72 ) 73 74 if os.path.exists(cfg.pretrained_model): 75 model = PunctuationCapitalizationModel.restore_from(cfg.pretrained_model) 76 elif cfg.pretrained_model in PunctuationCapitalizationModel.get_available_model_names(): 77 model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model) 78 else: 79 raise ValueError( 80 f'Provide path to the pre-trained .nemo file or choose from {PunctuationCapitalizationModel.list_available_models()}' 81 ) 82 83 data_dir = cfg.model.dataset.get('data_dir', None) 84 85 if data_dir is None: 86 logging.error( 87 'No dataset directory provided. Skipping evaluation. ' 88 'To run evaluation on a file, specify path to the directory that contains test_ds.text_file and test_ds.labels_file with "model.dataset.data_dir" argument.' 89 ) 90 elif not os.path.exists(data_dir): 91 logging.error(f'{data_dir} is not found, skipping evaluation on the test set.') 92 else: 93 model.update_data_dir(data_dir=data_dir) 94 model._cfg.dataset = cfg.model.dataset 95 96 if not hasattr(cfg.model, 'test_ds'): 97 logging.error(f'model.test_ds was not found in the config, skipping evaluation') 98 elif model.prepare_test(trainer): 99 model.setup_test_data(cfg.model.test_ds) 100 trainer.test(model) 101 else: 102 logging.error('Skipping the evaluation. The trainer is not setup properly.') 103 104 # run an inference on a few examples 105 queries = [ 106 'we bought four shirts one pen and a mug from the nvidia gear store in santa clara', 107 'what can i do for you today', 108 'how are you', 109 ] 110 111 inference_results = model.add_punctuation_capitalization(queries, batch_size=len(queries), max_seq_length=512) 112 113 for query, result in zip(queries, inference_results): 114 logging.info(f'Query : {query}') 115 logging.info(f'Result: {result.strip()}\n') 116 117 logging.info(f'Results are saved at {exp_dir}') 118 119 120 if __name__ == '__main__': 121 main() 122 [end of examples/nlp/token_classification/punctuation_capitalization_evaluate.py] [start of examples/nlp/token_classification/punctuation_capitalization_train.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 17 import pytorch_lightning as pl 18 from omegaconf import DictConfig, OmegaConf 19 20 from nemo.collections.nlp.models import PunctuationCapitalizationModel 21 from nemo.core.config import hydra_runner 22 from nemo.utils import logging 23 from nemo.utils.exp_manager import exp_manager 24 25 26 """ 27 This script show how to train a Punctuation and Capitalization Model. 28 More details on the task and data format could be found in tutorials/nlp/Punctuation_and_Capitalization.ipynb 29 30 *** Setting the configs *** 31 32 The model and the PT trainer are defined in a config file which declares multiple important sections. 33 The most important ones are: 34 model: All arguments that are related to the Model - language model, tokenizer, token classifier, optimizer, 35 schedulers, and datasets/data loaders. 36 trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs, 37 precision level, etc. 38 This script uses the `/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml` config file 39 by default. You may update the config file from the file directly. 40 The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'. 41 42 For more details about the config files and different ways of model restoration, see tutorials/00_NeMo_Primer.ipynb 43 44 *** Model training *** 45 46 To run this script and train the model from scratch, use: 47 python punctuation_and_capitalization_train.py \ 48 model.dataset.data_dir=<PATH_TO_DATA_DIR> 49 50 To use one of the pretrained versions of the model and finetune it, run: 51 python punctuation_and_capitalization.py \ 52 pretrained_model=punctuation_en_bert \ 53 model.dataset.data_dir=<PATH_TO_DATA_DIR> 54 55 <PATH_TO_DATA_DIR> - a directory that contains test_ds.text_file and test_ds.labels_file (see the config) 56 pretrained_model - pretrained PunctuationCapitalization model from list_available_models() or 57 path to a .nemo file, for example: punctuation_en_bert or model.nemo 58 59 """ 60 61 62 @hydra_runner(config_path="conf", config_name="punctuation_capitalization_config") 63 def main(cfg: DictConfig) -> None: 64 trainer = pl.Trainer(**cfg.trainer) 65 exp_manager(trainer, cfg.get("exp_manager", None)) 66 67 if not cfg.pretrained_model: 68 logging.info(f'Config: {OmegaConf.to_yaml(cfg)}') 69 model = PunctuationCapitalizationModel(cfg.model, trainer=trainer) 70 else: 71 if os.path.exists(cfg.pretrained_model): 72 model = PunctuationCapitalizationModel.restore_from(cfg.pretrained_model) 73 elif cfg.pretrained_model in PunctuationCapitalizationModel.get_available_model_names(): 74 model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model) 75 else: 76 raise ValueError( 77 f'Provide path to the pre-trained .nemo file or choose from {PunctuationCapitalizationModel.list_available_models()}' 78 ) 79 80 data_dir = cfg.model.dataset.get('data_dir', None) 81 if data_dir: 82 if not os.path.exists(data_dir): 83 raise ValueError(f'{data_dir} is not found at') 84 85 # we can also do finetuning of the pretrained model but we would need to update the data dir 86 model.update_data_dir(data_dir) 87 # setup train and validation Pytorch DataLoaders 88 model.setup_training_data() 89 model.setup_validation_data() 90 logging.info(f'Using config file of the pretrained model') 91 else: 92 raise ValueError( 93 'Specify a valid dataset directory that contains test_ds.text_file and test_ds.labels_file \ 94 with "model.dataset.data_dir" argument' 95 ) 96 97 trainer.fit(model) 98 99 100 if __name__ == '__main__': 101 main() 102 [end of examples/nlp/token_classification/punctuation_capitalization_train.py] [start of nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 __all__ = ['BertPunctuationCapitalizationDataset', 'BertPunctuationCapitalizationInferDataset'] 16 17 import itertools 18 import os 19 import pickle 20 from typing import Dict, List, Optional, Tuple 21 22 import numpy as np 23 import torch 24 from torch.nn.utils.rnn import pad_sequence 25 26 from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec 27 from nemo.collections.nlp.data.data_utils.data_preprocessing import get_label_stats, get_stats 28 from nemo.core.classes import Dataset 29 from nemo.core.neural_types import ChannelType, Index, LabelsType, MaskType, NeuralType 30 from nemo.core.neural_types.elements import BoolType 31 from nemo.utils import logging 32 33 34 def get_features( 35 queries: List[str], 36 max_seq_length: int, 37 tokenizer: TokenizerSpec, 38 punct_label_ids: dict = None, 39 capit_label_ids: dict = None, 40 pad_label: str = 'O', 41 punct_labels_lines=None, 42 capit_labels_lines=None, 43 ignore_extra_tokens=False, 44 ignore_start_end: Optional[bool] = False, 45 ): 46 """ 47 Processes the data and returns features. 48 49 Args: 50 queries: text sequences 51 max_seq_length: max sequence length minus 2 for [CLS] and [SEP] 52 tokenizer: such as AutoTokenizer 53 pad_label: pad value use for labels. By default, it's the neutral label. 54 punct_label_ids: dict to map punctuation labels to label ids. 55 Starts with pad_label->0 and then increases in alphabetical order. 56 Required for training and evaluation, not needed for inference. 57 capit_label_ids: dict to map labels to label ids. Starts 58 with pad_label->0 and then increases in alphabetical order. 59 Required for training and evaluation, not needed for inference. 60 punct_labels: list of labels for every word in a sequence (str) 61 capit_labels: list of labels for every word in a sequence (str) 62 ignore_extra_tokens: whether to ignore extra tokens in the loss_mask 63 ignore_start_end: whether to ignore bos and eos tokens in the loss_mask 64 65 Returns: 66 all_input_ids: input ids for all tokens 67 all_segment_ids: token type ids 68 all_input_mask: attention mask to use for BERT model 69 all_subtokens_mask: masks out all subwords besides the first one 70 all_loss_mask: loss mask to mask out tokens during training 71 punct_all_labels: all labels for punctuation task (ints) 72 capit_all_labels: all labels for capitalization task (ints) 73 punct_label_ids: label (str) to id (int) map for punctuation task 74 capit_label_ids: label (str) to id (int) map for capitalization task 75 """ 76 all_subtokens = [] 77 all_loss_mask = [] 78 all_subtokens_mask = [] 79 all_segment_ids = [] 80 all_input_ids = [] 81 all_input_mask = [] 82 sent_lengths = [] 83 punct_all_labels = [] 84 capit_all_labels = [] 85 with_label = False 86 87 if punct_labels_lines and capit_labels_lines: 88 with_label = True 89 90 for i, query in enumerate(queries): 91 words = query.strip().split() 92 93 # add bos token 94 subtokens = [tokenizer.cls_token] 95 loss_mask = [1 - ignore_start_end] 96 subtokens_mask = [0] 97 if with_label: 98 pad_id = punct_label_ids[pad_label] 99 punct_labels = [pad_id] 100 punct_query_labels = [punct_label_ids[lab] for lab in punct_labels_lines[i]] 101 102 capit_labels = [pad_id] 103 capit_query_labels = [capit_label_ids[lab] for lab in capit_labels_lines[i]] 104 105 for j, word in enumerate(words): 106 word_tokens = tokenizer.text_to_tokens(word) 107 subtokens.extend(word_tokens) 108 109 loss_mask.append(1) 110 loss_mask.extend([int(not ignore_extra_tokens)] * (len(word_tokens) - 1)) 111 112 subtokens_mask.append(1) 113 subtokens_mask.extend([0] * (len(word_tokens) - 1)) 114 115 if with_label: 116 punct_labels.extend([punct_query_labels[j]] * len(word_tokens)) 117 capit_labels.extend([capit_query_labels[j]] * len(word_tokens)) 118 119 # add eos token 120 subtokens.append(tokenizer.sep_token) 121 loss_mask.append(1 - ignore_start_end) 122 subtokens_mask.append(0) 123 sent_lengths.append(len(subtokens)) 124 all_subtokens.append(subtokens) 125 all_loss_mask.append(loss_mask) 126 all_subtokens_mask.append(subtokens_mask) 127 all_input_mask.append([1] * len(subtokens)) 128 129 if with_label: 130 punct_labels.append(pad_id) 131 punct_all_labels.append(punct_labels) 132 capit_labels.append(pad_id) 133 capit_all_labels.append(capit_labels) 134 135 max_seq_length = min(max_seq_length, max(sent_lengths)) 136 logging.info(f'Max length: {max_seq_length}') 137 get_stats(sent_lengths) 138 too_long_count = 0 139 140 for i, subtokens in enumerate(all_subtokens): 141 if len(subtokens) > max_seq_length: 142 subtokens = [tokenizer.cls_token] + subtokens[-max_seq_length + 1 :] 143 all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1 :] 144 all_loss_mask[i] = [int(not ignore_start_end)] + all_loss_mask[i][-max_seq_length + 1 :] 145 all_subtokens_mask[i] = [0] + all_subtokens_mask[i][-max_seq_length + 1 :] 146 147 if with_label: 148 punct_all_labels[i] = [pad_id] + punct_all_labels[i][-max_seq_length + 1 :] 149 capit_all_labels[i] = [pad_id] + capit_all_labels[i][-max_seq_length + 1 :] 150 too_long_count += 1 151 152 all_input_ids.append(tokenizer.tokens_to_ids(subtokens)) 153 154 if len(subtokens) < max_seq_length: 155 extra = max_seq_length - len(subtokens) 156 all_input_ids[i] = all_input_ids[i] + [0] * extra 157 all_loss_mask[i] = all_loss_mask[i] + [0] * extra 158 all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra 159 all_input_mask[i] = all_input_mask[i] + [0] * extra 160 161 if with_label: 162 punct_all_labels[i] = punct_all_labels[i] + [pad_id] * extra 163 capit_all_labels[i] = capit_all_labels[i] + [pad_id] * extra 164 165 all_segment_ids.append([0] * max_seq_length) 166 167 logging.info(f'{too_long_count} are longer than {max_seq_length}') 168 169 for i in range(min(len(all_input_ids), 5)): 170 logging.info("*** Example ***") 171 logging.info("i: %s" % (i)) 172 logging.info("subtokens: %s" % " ".join(list(map(str, all_subtokens[i])))) 173 logging.info("loss_mask: %s" % " ".join(list(map(str, all_loss_mask[i])))) 174 logging.info("input_mask: %s" % " ".join(list(map(str, all_input_mask[i])))) 175 logging.info("subtokens_mask: %s" % " ".join(list(map(str, all_subtokens_mask[i])))) 176 if with_label: 177 logging.info("punct_labels: %s" % " ".join(list(map(str, punct_all_labels[i])))) 178 logging.info("capit_labels: %s" % " ".join(list(map(str, capit_all_labels[i])))) 179 180 return ( 181 all_input_ids, 182 all_segment_ids, 183 all_input_mask, 184 all_subtokens_mask, 185 all_loss_mask, 186 punct_all_labels, 187 capit_all_labels, 188 punct_label_ids, 189 capit_label_ids, 190 ) 191 192 193 class BertPunctuationCapitalizationDataset(Dataset): 194 """ 195 Creates dataset to use during training for punctuaion and capitalization tasks with a pretrained model. 196 For dataset to use during inference without labels, see BertPunctuationCapitalizationInferDataset. 197 198 Args: 199 text_file: file to sequences, each line should a sentence, no header. 200 label_file: file to labels, each line corresponds to word labels for a sentence in the text_file. No header. 201 max_seq_length: max sequence length minus 2 for [CLS] and [SEP] 202 tokenizer: such as AutoTokenizer 203 num_samples: number of samples you want to use for the dataset. 204 If -1, use all dataset. Useful for testing. 205 pad_label: pad value use for labels. 206 by default, it's the neutral label. 207 punct_label_ids and capit_label_ids (dict): 208 dict to map labels to label ids. 209 Starts with pad_label->0 and then increases in alphabetical order 210 For dev set use label_ids generated during training to support 211 cases when not all labels are present in the dev set. 212 For training set label_ids should be None or loaded from cache 213 ignore_extra_tokens: whether to ignore extra tokens in the loss_mask 214 ignore_start_end: whether to ignore bos and eos tokens in the loss_mask 215 use_cache: whether to use processed data cache or not 216 get_label_frequencies: whether to generate label frequencies 217 punct_label_ids_file and capit_label_ids_file: name of the files to save in .nemo 218 """ 219 220 @property 221 def output_types(self) -> Optional[Dict[str, NeuralType]]: 222 """Returns definitions of module output ports. """ 223 return { 224 'input_ids': NeuralType(('B', 'T'), ChannelType()), 225 'segment_ids': NeuralType(('B', 'T'), ChannelType()), 226 'input_mask': NeuralType(('B', 'T'), MaskType()), 227 'subtokens_mask': NeuralType(('B', 'T'), MaskType()), 228 'loss_mask': NeuralType(('B', 'T'), MaskType()), 229 'punct_labels': NeuralType(('B', 'T'), LabelsType()), 230 'capit_labels': NeuralType(('B', 'T'), LabelsType()), 231 } 232 233 def __init__( 234 self, 235 text_file: str, 236 label_file: str, 237 max_seq_length: int, 238 tokenizer: TokenizerSpec, 239 num_samples: int = -1, 240 pad_label: str = 'O', 241 punct_label_ids: Dict[str, int] = None, 242 capit_label_ids: Dict[str, int] = None, 243 ignore_extra_tokens: bool = False, 244 ignore_start_end: bool = False, 245 use_cache: bool = True, 246 get_label_frequencies: bool = False, 247 punct_label_ids_file: str = 'punct_label_ids.csv', 248 capit_label_ids_file: str = 'capit_label_ids.csv', 249 ): 250 """ Initializes BertPunctuationCapitalizationDataset. """ 251 252 if not (os.path.exists(text_file) and os.path.exists(label_file)): 253 raise FileNotFoundError( 254 f'{text_file} or {label_file} not found. The data should be splitted into 2 files: text.txt and \ 255 labels.txt. Each line of the text.txt file contains text sequences, where words are separated with \ 256 spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are \ 257 separated with spaces. Each line of the files should follow the format: \ 258 [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \ 259 [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).' 260 ) 261 262 # Cache features 263 data_dir = os.path.dirname(text_file) 264 filename = os.path.basename(text_file) 265 266 if not filename.endswith('.txt'): 267 raise ValueError("{text_file} should have extension .txt") 268 269 filename = filename[:-4] 270 vocab_size = getattr(tokenizer, "vocab_size", 0) 271 features_pkl = os.path.join( 272 data_dir, 273 "cached_{}_{}_{}_{}_{}".format( 274 filename, tokenizer.name, str(max_seq_length), str(vocab_size), str(num_samples) 275 ), 276 ) 277 278 self.punct_label_ids_file = os.path.join(data_dir, punct_label_ids_file) 279 self.capit_label_ids_file = os.path.join(data_dir, capit_label_ids_file) 280 281 master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0 282 cache_files_exist = ( 283 os.path.exists(features_pkl) 284 and os.path.exists(self.punct_label_ids_file) 285 and os.path.exists(self.capit_label_ids_file) 286 ) 287 features = None 288 if master_device and not (cache_files_exist and use_cache): 289 if num_samples == 0: 290 raise ValueError("num_samples has to be positive", num_samples) 291 logging.info(f'Processing {text_file}') 292 with open(text_file, 'r') as f: 293 text_lines = f.readlines() 294 295 # Collect all possible labels 296 punct_unique_labels = set() 297 capit_unique_labels = set() 298 punct_labels_lines = [] 299 capit_labels_lines = [] 300 with open(label_file, 'r') as f: 301 for line in f: 302 line = line.strip().split() 303 304 # extract punctuation and capitalization labels 305 punct_line, capit_line = zip(*line) 306 punct_labels_lines.append(punct_line) 307 capit_labels_lines.append(capit_line) 308 309 punct_unique_labels.update(punct_line) 310 capit_unique_labels.update(capit_line) 311 312 if len(punct_labels_lines) != len(text_lines): 313 raise ValueError("Labels file should contain labels for every word") 314 315 dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines)) 316 317 if num_samples > 0: 318 dataset = dataset[:num_samples] 319 320 dataset = list(zip(*dataset)) 321 text_lines = dataset[0] 322 punct_labels_lines = dataset[1] 323 capit_labels_lines = dataset[2] 324 325 # for dev/test sets use label mapping from training set 326 if punct_label_ids: 327 if len(punct_label_ids) != len(punct_unique_labels): 328 logging.info( 329 'Not all labels from the specified' 330 + 'label_ids dictionary are present in the' 331 + 'current dataset. Using the provided' 332 + 'label_ids dictionary.' 333 ) 334 else: 335 logging.info('Using the provided label_ids dictionary.') 336 else: 337 logging.info( 338 'Creating a new label to label_id dictionary.' 339 + ' It\'s recommended to use label_ids generated' 340 + ' during training for dev/test sets to avoid' 341 + ' errors if some labels are not' 342 + ' present in the dev/test sets.' 343 + ' For training set label_ids should be None.' 344 ) 345 346 def create_label_ids(unique_labels, pad_label=pad_label): 347 label_ids = {pad_label: 0} 348 if pad_label in unique_labels: 349 unique_labels.remove(pad_label) 350 for label in sorted(unique_labels): 351 label_ids[label] = len(label_ids) 352 return label_ids 353 354 punct_label_ids = create_label_ids(punct_unique_labels) 355 capit_label_ids = create_label_ids(capit_unique_labels) 356 357 self._save_label_ids(punct_label_ids, self.punct_label_ids_file) 358 self._save_label_ids(capit_label_ids, self.capit_label_ids_file) 359 360 features = get_features( 361 text_lines, 362 max_seq_length, 363 tokenizer, 364 pad_label=pad_label, 365 punct_labels_lines=punct_labels_lines, 366 capit_labels_lines=capit_labels_lines, 367 punct_label_ids=punct_label_ids, 368 capit_label_ids=capit_label_ids, 369 ignore_extra_tokens=ignore_extra_tokens, 370 ignore_start_end=ignore_start_end, 371 ) 372 373 pickle.dump(features, open(features_pkl, "wb")) 374 logging.info(f'Features saved to {features_pkl}') 375 376 # wait until the master process writes to the processed data files 377 if torch.distributed.is_initialized(): 378 torch.distributed.barrier() 379 380 if features is None: 381 features = pickle.load(open(features_pkl, 'rb')) 382 logging.info(f'Features restored from {features_pkl}') 383 384 self.all_input_ids = features[0] 385 self.all_segment_ids = features[1] 386 self.all_input_mask = features[2] 387 self.all_subtokens_mask = features[3] 388 self.all_loss_mask = features[4] 389 self.punct_all_labels = features[5] 390 self.capit_all_labels = features[6] 391 self.punct_label_ids = features[7] 392 self.capit_label_ids = features[8] 393 394 if get_label_frequencies: 395 self.punct_label_frequencies = self._calculate_label_frequencies(self.punct_all_labels, data_dir, 'punct') 396 self.capit_label_frequencies = self._calculate_label_frequencies(self.capit_all_labels, data_dir, 'capit') 397 398 def _calculate_label_frequencies(self, all_labels: List[int], data_dir: str, name: str) -> Dict[str, float]: 399 """ Calculates labels frequencies """ 400 merged_labels = itertools.chain.from_iterable(all_labels) 401 logging.info('Three most popular labels') 402 _, label_frequencies, _ = get_label_stats(merged_labels, data_dir + '/label_count_' + name + '.tsv') 403 return label_frequencies 404 405 def _save_label_ids(self, label_ids: Dict[str, int], filename: str) -> None: 406 """ Saves label ids map to a file """ 407 with open(filename, 'w') as out: 408 labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1])) 409 out.write('\n'.join(labels)) 410 logging.info(f'Labels: {label_ids}') 411 logging.info(f'Labels mapping saved to : {out.name}') 412 413 def __len__(self): 414 return len(self.all_input_ids) 415 416 def __getitem__(self, idx): 417 return ( 418 np.array(self.all_input_ids[idx]), 419 np.array(self.all_segment_ids[idx]), 420 np.array(self.all_input_mask[idx], dtype=np.long), 421 np.array(self.all_subtokens_mask[idx]), 422 np.array(self.all_loss_mask[idx]), 423 np.array(self.punct_all_labels[idx]), 424 np.array(self.capit_all_labels[idx]), 425 ) 426 427 428 def _get_subtokens_and_subtokens_mask(query: str, tokenizer: TokenizerSpec) -> Tuple[List[str], List[int]]: 429 """ 430 Tokenizes input query into subtokens and creates subtokens mask. Subtokens mask is an array of the same length as 431 subtokens array and contains zeros and ones in which. If element of mask equals 1, then corresponding subtoken in 432 subtokens array is first subtoken in some word 433 Args: 434 query: a string that will be tokenized 435 tokenizer: an instance of tokenizer 436 Returns: 437 subtokens: list of subtokens 438 subtokens_mask: list of ints 439 """ 440 words = query.strip().split() 441 subtokens = [] 442 subtokens_mask = [] 443 for j, word in enumerate(words): 444 word_tokens = tokenizer.text_to_tokens(word) 445 subtokens.extend(word_tokens) 446 subtokens_mask.append(1) 447 subtokens_mask.extend([0] * (len(word_tokens) - 1)) 448 return subtokens, subtokens_mask 449 450 451 def _check_max_seq_length_and_margin_and_step(max_seq_length: int, margin: int, step: int): 452 """ 453 Checks values of ``max_seq_length``, ``margin``, and ``step``. 454 Args: 455 max_seq_length: a segment length with ``[CLS]`` and ``[SEP]`` tokens 456 margin: a number of input tokens near edges of segments which are not used in punctuation and capitalization 457 prediction. 458 step: offset of consequent segments. 459 Returns: 460 None 461 """ 462 if max_seq_length < 3: 463 raise ValueError( 464 f"Parameter `max_seq_length={max_seq_length}` cannot be less than 3 because `max_seq_length` is a length " 465 f"of a segment with [CLS] and [SEP] tokens." 466 ) 467 if margin >= (max_seq_length - 2) // 2 and margin > 0 or margin < 0: 468 raise ValueError( 469 f"Parameter `margin` has to be not negative and less than `(max_seq_length - 2) // 2`. Don't forget about " 470 f"CLS and EOS tokens in the beginning and the end of segment. margin={margin}, " 471 f"max_seq_length={max_seq_length}" 472 ) 473 if step <= 0: 474 raise ValueError(f"Parameter `step` has to be positive whereas step={step}") 475 if step > max_seq_length - 2 - 2 * margin: 476 logging.warning( 477 f"Parameter step={step} is too big. It will be reduced to `min(max_seq_length, <maximum query length> + 2) " 478 f"- 2 - 2 * margin`." 479 ) 480 481 482 def get_features_infer( 483 queries: List[str], 484 tokenizer: TokenizerSpec, 485 max_seq_length: int = 64, 486 step: Optional[int] = 8, 487 margin: Optional[int] = 16, 488 ) -> Tuple[ 489 List[List[int]], List[List[int]], List[List[int]], List[List[int]], List[int], List[int], List[bool], List[bool], 490 ]: 491 """ 492 Processes the data and returns features. 493 494 Args: 495 queries: text sequences 496 tokenizer: such as AutoTokenizer 497 max_seq_length: max sequence length minus 2 for [CLS] and [SEP] 498 step: relative shift of consequent segments into which long queries are split. Long queries are split into 499 segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are 500 tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is 501 tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. 502 margin: number of subtokens near edges of segments which are not used for punctuation and capitalization 503 prediction. The first segment does not have left margin and the last segment does not have right 504 margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``, 505 ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments 506 ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], 507 ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions 508 computation, margins are removed. In the next list, subtokens which logits are not used for final 509 predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], 510 ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. 511 512 Returns: 513 all_input_ids: list of input ids of all segments 514 all_segment_ids: token type ids of all segments 515 all_input_mask: attention mask to use for BERT model 516 all_subtokens_mask: masks out all subwords besides the first one 517 all_quantities_of_preceding_words: number of words in query preceding a segment. Used for joining 518 predictions from overlapping segments. 519 all_query_ids: index of a query to which segment belongs 520 all_is_first: is segment first segment in a query 521 all_is_last: is segment last segment in a query 522 """ 523 st = [] 524 stm = [] 525 sent_lengths = [] 526 for i, query in enumerate(queries): 527 subtokens, subtokens_mask = _get_subtokens_and_subtokens_mask(query, tokenizer) 528 sent_lengths.append(len(subtokens)) 529 st.append(subtokens) 530 stm.append(subtokens_mask) 531 _check_max_seq_length_and_margin_and_step(max_seq_length, margin, step) 532 if max_seq_length > max(sent_lengths) + 2: 533 max_seq_length = max(sent_lengths) + 2 534 # If `max_seq_length` is greater than maximum length of input query, parameters ``margin`` and ``step`` are 535 # not used will not be used. 536 step = 1 537 # Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS 538 length = max_seq_length - 2 539 else: 540 # Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS 541 length = max_seq_length - 2 542 step = min(length - margin * 2, step) 543 logging.info(f'Max length: {max_seq_length}') 544 get_stats(sent_lengths) 545 all_input_ids, all_segment_ids, all_subtokens_mask, all_input_mask, all_input_mask = [], [], [], [], [] 546 all_quantities_of_preceding_words, all_query_ids, all_is_first, all_is_last = [], [], [], [] 547 for q_i, query_st in enumerate(st): 548 q_inp_ids, q_segment_ids, q_subtokens_mask, q_inp_mask, q_quantities_of_preceding_words = [], [], [], [], [] 549 for i in range(0, max(len(query_st), length) - length + step, step): 550 subtokens = [tokenizer.cls_token] + query_st[i : i + length] + [tokenizer.sep_token] 551 q_inp_ids.append(tokenizer.tokens_to_ids(subtokens)) 552 q_segment_ids.append([0] * len(subtokens)) 553 q_subtokens_mask.append([0] + stm[q_i][i : i + length] + [0]) 554 q_inp_mask.append([1] * len(subtokens)) 555 q_quantities_of_preceding_words.append(np.count_nonzero(stm[q_i][:i])) 556 all_input_ids.append(q_inp_ids) 557 all_segment_ids.append(q_segment_ids) 558 all_subtokens_mask.append(q_subtokens_mask) 559 all_input_mask.append(q_inp_mask) 560 all_quantities_of_preceding_words.append(q_quantities_of_preceding_words) 561 all_query_ids.append([q_i] * len(q_inp_ids)) 562 all_is_first.append([True] + [False] * (len(q_inp_ids) - 1)) 563 all_is_last.append([False] * (len(q_inp_ids) - 1) + [True]) 564 return ( 565 list(itertools.chain(*all_input_ids)), 566 list(itertools.chain(*all_segment_ids)), 567 list(itertools.chain(*all_input_mask)), 568 list(itertools.chain(*all_subtokens_mask)), 569 list(itertools.chain(*all_quantities_of_preceding_words)), 570 list(itertools.chain(*all_query_ids)), 571 list(itertools.chain(*all_is_first)), 572 list(itertools.chain(*all_is_last)), 573 ) 574 575 576 class BertPunctuationCapitalizationInferDataset(Dataset): 577 """ 578 Creates dataset to use during inference for punctuation and capitalization tasks with a pretrained model. 579 For dataset to use during training with labels, see BertPunctuationCapitalizationDataset. 580 581 Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments 582 which then processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization 583 including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is shift 584 between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near 585 borders of segments which have only one side context. 586 587 Args: 588 queries: list of sequences. 589 tokenizer: such as AutoTokenizer 590 max_seq_length: max sequence length minus 2 for [CLS] and [SEP] 591 step: relative shift of consequent segments into which long queries are split. Long queries are split into 592 segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are 593 tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is 594 tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. 595 margin: number of subtokens in the beginning and the end of segments which are not used for prediction 596 computation. The first segment does not have left margin and the last segment does not have right 597 margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``, 598 ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments 599 ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], 600 ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions 601 computation, margins are removed. In the next list, subtokens which logits are not used for final 602 predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], 603 ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. 604 """ 605 606 @property 607 def output_types(self) -> Optional[Dict[str, NeuralType]]: 608 """Returns definitions of module output ports. 609 610 input_ids: ids of word subtokens encoded using tokenizer 611 segment_ids: an array of zeros 612 input_mask: attention mask. Zeros if input is padding. 613 subtoken_mask: a mask used for retrieving predictions for words. An element equals ``1`` if corresponding 614 token is the first token in some word and zero otherwise. For example, if input query 615 "language processing" is tokenized into ["[CLS]", "language", "process", "ing", "SEP"], then 616 ``subtokens_mask`` will be [0, 1, 1, 0, 0]. 617 quantities_of_preceding_words: number of words preceding a segment in a query. It is used for uniting 618 predictions from different segments if such segments overlap. For example, if query "hello john" is 619 tokenized into segments ``[['hell', 'o'], ['john']]``, then ``quantities_of_preceding_words=[0, 1]``. 620 query_ids: ids of queries to which segments belong. For example, if ``queries=["foo", "bar"]`` are 621 segmented into ``[[['[CLS]', 'f', 'o', '[SEP]'], ['[CLS]', 'o', 'o', '[SEP]']], 622 [['[CLS]', 'b', 'a', '[SEP]'], ['[CLS]', 'a', 'r', '[SEP]']]]``, then for batch 623 [['[CLS]', 'o', 'o', '[SEP]'], ['[CLS]', 'b', 'a', '[SEP]'], ['[CLS]', 'a', 'r', '[SEP]']] 624 ``query_ids=[0, 1, 1]``. 625 is_first: is segment the first segment in query. The left margin of the first segment in a query is not 626 removed and this parameter is used to identify first segments. 627 is_last: is segment the last segment in query. The right margin of the last segment in a query is not 628 removed and this parameter is used to identify last segments. 629 630 """ 631 return { 632 'input_ids': NeuralType(('B', 'T'), ChannelType()), 633 'segment_ids': NeuralType(('B', 'T'), ChannelType()), 634 'input_mask': NeuralType(('B', 'T'), MaskType()), 635 'subtokens_mask': NeuralType(('B', 'T'), MaskType()), 636 'quantities_of_preceding_words': NeuralType(('B',), Index()), 637 'query_ids': NeuralType(('B',), Index()), 638 'is_first': NeuralType(('B',), BoolType()), 639 'is_last': NeuralType(('B',), BoolType()), 640 } 641 642 def __init__( 643 self, queries: List[str], tokenizer: TokenizerSpec, max_seq_length: int = 128, step: int = 32, margin: int = 16 644 ): 645 features = get_features_infer( 646 queries=queries, max_seq_length=max_seq_length, tokenizer=tokenizer, step=step, margin=margin 647 ) 648 self.all_input_ids: List[List[int]] = features[0] 649 self.all_segment_ids: List[List[int]] = features[1] 650 self.all_input_mask: List[List[int]] = features[2] 651 self.all_subtokens_mask: List[List[int]] = features[3] 652 self.all_quantities_of_preceding_words: List[int] = features[4] 653 self.all_query_ids: List[int] = features[5] 654 self.all_is_first: List[bool] = features[6] 655 self.all_is_last: List[bool] = features[7] 656 657 def __len__(self) -> int: 658 return len(self.all_input_ids) 659 660 def collate_fn( 661 self, batch: List[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]] 662 ) -> Tuple[ 663 torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Tuple[int], Tuple[int], Tuple[bool], Tuple[bool] 664 ]: 665 inp_ids, segment_ids, inp_mask, st_mask, n_preceding, query_ids, is_first, is_last = zip(*batch) 666 return ( 667 pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0), 668 pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0), 669 pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0), 670 pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0), 671 n_preceding, 672 query_ids, 673 is_first, 674 is_last, 675 ) 676 677 def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]: 678 return ( 679 np.array(self.all_input_ids[idx]), 680 np.array(self.all_segment_ids[idx]), 681 np.array(self.all_input_mask[idx], dtype=np.float32), 682 np.array(self.all_subtokens_mask[idx]), 683 self.all_quantities_of_preceding_words[idx], 684 self.all_query_ids[idx], 685 self.all_is_first[idx], 686 self.all_is_last[idx], 687 ) 688 [end of nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py] [start of nemo/collections/nlp/models/nlp_model.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import hashlib 16 import json 17 import os 18 from typing import Any, Optional 19 20 from omegaconf import DictConfig, OmegaConf 21 from pytorch_lightning import Trainer 22 from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml 23 from pytorch_lightning.utilities import rank_zero_only 24 from pytorch_lightning.utilities.cloud_io import load as pl_load 25 from pytorch_lightning.utilities.migration import pl_legacy_patch 26 from transformers import TRANSFORMERS_CACHE 27 28 from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer 29 from nemo.collections.nlp.modules import BertModule 30 from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import VOCAB_FILE_NAME 31 from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer 32 from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector 33 from nemo.core.classes import ModelPT 34 from nemo.core.classes.exportable import Exportable 35 from nemo.utils import AppState, logging 36 37 try: 38 import apex 39 40 HAVE_APEX = True 41 42 except (ImportError, ModuleNotFoundError): 43 HAVE_APEX = False 44 45 46 __all__ = ['NLPModel'] 47 48 NEMO_NLP_TMP = os.path.join(os.path.dirname(str(TRANSFORMERS_CACHE)), "nemo_nlp_tmp") 49 50 os.makedirs(NEMO_NLP_TMP, exist_ok=True) 51 52 53 class NLPModel(ModelPT, Exportable): 54 """Base class for NLP Models. 55 """ 56 57 def __init__(self, cfg: DictConfig, trainer: Trainer = None): 58 super().__init__(cfg, trainer) 59 # handles model parallel save and restore logic 60 self._save_restore_connector = NLPSaveRestoreConnector() 61 self.set_world_size(trainer) 62 if not HAVE_APEX: 63 logging.warning("Apex was not found. Using model parallel or megatron models will error out.") 64 65 def register_artifact( 66 self, config_path: str, src: str, verify_src_exists: bool = False, 67 ): 68 """ Overrides ModelPT register_artifact default behavior. NLP models usually need artifacts that are optional.""" 69 return super().register_artifact(config_path, src, verify_src_exists=verify_src_exists) 70 71 @rank_zero_only 72 def register_bert_model(self): 73 """Adds encoder config to .nemo archive for Jarvis. 74 """ 75 # check if there is an encoder, warn if not 76 if self.bert_model is None: 77 raise ValueError('Instantiate self.bert_model before registering it.') 78 else: 79 # get encoder config and create source for artifact 80 81 if isinstance(self.bert_model, BertModule): 82 # HuggingFace Transformer Config 83 pretrained_model_name = self.bert_model.name_or_path 84 # Some HF names have "/" in them so we replace with _ 85 pretrained_model_name = pretrained_model_name.replace("/", "_") 86 encoder_config_path = pretrained_model_name + '_encoder_config' 87 encoder_config_src = os.path.join(NEMO_NLP_TMP, encoder_config_path + '.json') 88 self.bert_model.config.to_json_file(encoder_config_src) # name requested by jarvis team 89 self.register_artifact('language_model.config_file', encoder_config_src) # for .nemo 90 else: 91 logging.info( 92 f'Registering BERT model config for {self.bert_model} is not yet supported. Please override this method if needed.' 93 ) 94 95 def setup_tokenizer(self, cfg: DictConfig): 96 """Instantiates tokenizer based on config and registers tokenizer artifacts. 97 98 If model is being restored from .nemo file then the tokenizer.vocab_file will 99 be used (if it exists). 100 101 Otherwise, we will use the vocab file provided in the config (if it exists). 102 103 Finally, if no vocab file is given (this happens frequently when using HF), 104 we will attempt to extract the vocab from the tokenizer object and then register it. 105 106 Args: 107 cfg (DictConfig): Tokenizer config 108 """ 109 vocab_file = None 110 if cfg.vocab_file: 111 vocab_file = self.register_artifact(config_path='tokenizer.vocab_file', src=cfg.vocab_file) 112 self.tokenizer = get_tokenizer( 113 tokenizer_name=cfg.tokenizer_name, 114 vocab_file=vocab_file, 115 special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None, 116 tokenizer_model=self.register_artifact(config_path='tokenizer.tokenizer_model', src=cfg.tokenizer_model), 117 ) 118 119 if vocab_file is None: 120 # when there is no vocab file we try to get the vocab from the tokenizer and register it 121 self._register_vocab_from_tokenizer(vocab_file_config_path='tokenizer.vocab_file', cfg=cfg) 122 123 @rank_zero_only 124 def _register_vocab_from_tokenizer( 125 self, 126 vocab_file_config_path: str = 'tokenizer.vocab_file', 127 vocab_dict_config_path: str = 'tokenizer_vocab_dict', 128 cfg: DictConfig = None, 129 ): 130 """Creates vocab file from tokenizer if vocab file is None. 131 132 Args: 133 vocab_file_config_path: path to the vocab_file in the config 134 vocab_dict_config_path: path to the vocab_dict in the config 135 cfg: tokenizer config 136 """ 137 if self.tokenizer is None: 138 raise ValueError('Instantiate self.tokenizer before registering vocab from it.') 139 else: 140 if isinstance(self.tokenizer, AutoTokenizer): 141 # extract vocab from tokenizer 142 vocab_dict = self.tokenizer.tokenizer.get_vocab() 143 144 # for fast and slow tokenizer vocabularies compatibility 145 vocab_dict = dict(sorted(vocab_dict.items(), key=lambda item: item[1])) 146 147 # get hash of vocab_dict to create a unique directory to write vocab_dict and vocab_file 148 m = hashlib.md5() 149 if 'tokenizer_name' in cfg: 150 if cfg.tokenizer_name is not None: 151 # different pretrained models with the same vocab will have different hash 152 m.update(cfg.tokenizer_name.encode()) 153 # get string representation of vocab_dict 154 vocab_dict_str = json.dumps(vocab_dict, sort_keys=True).encode() 155 m.update(vocab_dict_str) 156 vocab_dict_hash = m.hexdigest() 157 158 hash_path = os.path.join(NEMO_NLP_TMP, vocab_dict_hash) 159 os.makedirs(hash_path, exist_ok=True) 160 161 vocab_json_src = os.path.join(hash_path, vocab_dict_config_path) 162 163 with open(vocab_json_src, 'w', encoding='utf-8') as f: 164 f.write(json.dumps(vocab_dict, indent=2, sort_keys=True) + '\n') 165 self.register_artifact(config_path=vocab_dict_config_path, src=vocab_json_src) 166 167 tokenizer_name = self.tokenizer.tokenizer.__class__.__name__ 168 # save vocab file 169 # depending on the HuggingFace model, vocab file could mean different things, see VOCAB_FILE_NAME 170 self.tokenizer.save_vocabulary(hash_path) 171 172 # create vocab file 173 vocab_file_src = os.path.join(hash_path, VOCAB_FILE_NAME[tokenizer_name]) 174 cfg.vocab_file = vocab_file_src 175 self.register_artifact(config_path=vocab_file_config_path, src=vocab_file_src) 176 else: 177 logging.info( 178 f'Registering tokenizer vocab for {self.tokenizer} is not yet supported. Please override this method if needed.' 179 ) 180 181 @staticmethod 182 def _unpack_nemo_file(path2file: str, out_folder: str) -> str: 183 return super(NLPModel, NLPModel)._unpack_nemo_file(path2file, out_folder) 184 185 @staticmethod 186 def _make_nemo_file_from_folder(filename, source_dir): 187 return super(NLPModel, NLPModel)._make_nemo_file_from_folder(filename, source_dir) 188 189 @property 190 def input_module(self): 191 return self.bert_model 192 193 @property 194 def output_module(self): 195 return self.classifier 196 197 @property 198 def is_model_parallel_initialized(self): 199 app_state = AppState() 200 if app_state.model_parallel_group is not None: 201 return True 202 else: 203 return False 204 205 @classmethod 206 def load_from_checkpoint( 207 cls, 208 checkpoint_path: str, 209 map_location: Any = None, 210 hparams_file: Optional[str] = None, 211 strict: bool = True, 212 **kwargs, 213 ): 214 """ 215 Loads ModelPT from checkpoint, with some maintenance of restoration. 216 For documentation, please refer to LightningModule.load_from_checkpoin() documentation. 217 """ 218 checkpoint = None 219 try: 220 cls._set_model_restore_state(is_being_restored=True) 221 # TODO: replace with proper PTL API 222 with pl_legacy_patch(): 223 if map_location is not None: 224 checkpoint = pl_load(checkpoint_path, map_location=map_location) 225 else: 226 checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage) 227 228 if hparams_file is not None: 229 extension = hparams_file.split(".")[-1] 230 if extension.lower() == "csv": 231 hparams = load_hparams_from_tags_csv(hparams_file) 232 elif extension.lower() in ("yml", "yaml"): 233 hparams = load_hparams_from_yaml(hparams_file) 234 else: 235 raise ValueError(".csv, .yml or .yaml is required for `hparams_file`") 236 237 hparams["on_gpu"] = False 238 239 # overwrite hparams by the given file 240 checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = hparams 241 242 # for past checkpoint need to add the new key 243 if cls.CHECKPOINT_HYPER_PARAMS_KEY not in checkpoint: 244 checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = {} 245 # override the hparams with values that were passed in 246 # TODO: can we do this without overriding? 247 config_kwargs = kwargs.copy() 248 if 'trainer' in config_kwargs: 249 config_kwargs.pop('trainer') 250 checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].update(config_kwargs) 251 252 model = cls._load_model_state(checkpoint, strict=strict, **kwargs) 253 checkpoint = model 254 255 finally: 256 cls._set_model_restore_state(is_being_restored=False) 257 return checkpoint 258 [end of nemo/collections/nlp/models/nlp_model.py] [start of nemo/collections/nlp/models/token_classification/__init__.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 from nemo.collections.nlp.models.token_classification.punctuation_capitalization_model import ( 17 PunctuationCapitalizationModel, 18 ) 19 from nemo.collections.nlp.models.token_classification.token_classification_model import TokenClassificationModel 20 [end of nemo/collections/nlp/models/token_classification/__init__.py] [start of nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 from math import ceil 17 from typing import Dict, List, Optional, Tuple 18 19 import numpy as np 20 import torch 21 from omegaconf import DictConfig, OmegaConf 22 from pytorch_lightning import Trainer 23 from tqdm import tqdm 24 25 from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss 26 from nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset import ( 27 BertPunctuationCapitalizationDataset, 28 BertPunctuationCapitalizationInferDataset, 29 ) 30 from nemo.collections.nlp.metrics.classification_report import ClassificationReport 31 from nemo.collections.nlp.models.nlp_model import NLPModel 32 from nemo.collections.nlp.modules.common import TokenClassifier 33 from nemo.collections.nlp.modules.common.lm_utils import get_lm_model 34 from nemo.core.classes.common import PretrainedModelInfo, typecheck 35 from nemo.core.classes.exportable import Exportable 36 from nemo.core.neural_types import LogitsType, NeuralType 37 from nemo.utils import logging 38 39 __all__ = ['PunctuationCapitalizationModel'] 40 41 42 class PunctuationCapitalizationModel(NLPModel, Exportable): 43 @property 44 def input_types(self) -> Optional[Dict[str, NeuralType]]: 45 return self.bert_model.input_types 46 47 @property 48 def output_types(self) -> Optional[Dict[str, NeuralType]]: 49 return { 50 "punct_logits": NeuralType(('B', 'T', 'C'), LogitsType()), 51 "capit_logits": NeuralType(('B', 'T', 'C'), LogitsType()), 52 } 53 54 def __init__(self, cfg: DictConfig, trainer: Trainer = None): 55 """ 56 Initializes BERT Punctuation and Capitalization model. 57 """ 58 self.setup_tokenizer(cfg.tokenizer) 59 60 super().__init__(cfg=cfg, trainer=trainer) 61 62 self.bert_model = get_lm_model( 63 pretrained_model_name=cfg.language_model.pretrained_model_name, 64 config_file=self.register_artifact('language_model.config_file', cfg.language_model.config_file), 65 config_dict=OmegaConf.to_container(cfg.language_model.config) if cfg.language_model.config else None, 66 checkpoint_file=cfg.language_model.lm_checkpoint, 67 vocab_file=self.register_artifact('tokenizer.vocab_file', cfg.tokenizer.vocab_file), 68 ) 69 70 self.punct_classifier = TokenClassifier( 71 hidden_size=self.bert_model.config.hidden_size, 72 num_classes=len(self._cfg.punct_label_ids), 73 activation=cfg.punct_head.activation, 74 log_softmax=False, 75 dropout=cfg.punct_head.fc_dropout, 76 num_layers=cfg.punct_head.punct_num_fc_layers, 77 use_transformer_init=cfg.punct_head.use_transformer_init, 78 ) 79 80 self.capit_classifier = TokenClassifier( 81 hidden_size=self.bert_model.config.hidden_size, 82 num_classes=len(self._cfg.capit_label_ids), 83 activation=cfg.capit_head.activation, 84 log_softmax=False, 85 dropout=cfg.capit_head.fc_dropout, 86 num_layers=cfg.capit_head.capit_num_fc_layers, 87 use_transformer_init=cfg.capit_head.use_transformer_init, 88 ) 89 90 self.loss = CrossEntropyLoss(logits_ndim=3) 91 self.agg_loss = AggregatorLoss(num_inputs=2) 92 93 # setup to track metrics 94 self.punct_class_report = ClassificationReport( 95 num_classes=len(self._cfg.punct_label_ids), 96 label_ids=self._cfg.punct_label_ids, 97 mode='macro', 98 dist_sync_on_step=True, 99 ) 100 self.capit_class_report = ClassificationReport( 101 num_classes=len(self._cfg.capit_label_ids), 102 label_ids=self._cfg.capit_label_ids, 103 mode='macro', 104 dist_sync_on_step=True, 105 ) 106 107 @typecheck() 108 def forward(self, input_ids, attention_mask, token_type_ids=None): 109 """ 110 No special modification required for Lightning, define it as you normally would 111 in the `nn.Module` in vanilla PyTorch. 112 """ 113 hidden_states = self.bert_model( 114 input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask 115 ) 116 punct_logits = self.punct_classifier(hidden_states=hidden_states) 117 capit_logits = self.capit_classifier(hidden_states=hidden_states) 118 return punct_logits, capit_logits 119 120 def _make_step(self, batch): 121 input_ids, input_type_ids, input_mask, subtokens_mask, loss_mask, punct_labels, capit_labels = batch 122 punct_logits, capit_logits = self( 123 input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask 124 ) 125 126 punct_loss = self.loss(logits=punct_logits, labels=punct_labels, loss_mask=loss_mask) 127 capit_loss = self.loss(logits=capit_logits, labels=capit_labels, loss_mask=loss_mask) 128 loss = self.agg_loss(loss_1=punct_loss, loss_2=capit_loss) 129 return loss, punct_logits, capit_logits 130 131 def training_step(self, batch, batch_idx): 132 """ 133 Lightning calls this inside the training loop with the data from the training dataloader 134 passed in as `batch`. 135 """ 136 loss, _, _ = self._make_step(batch) 137 lr = self._optimizer.param_groups[0]['lr'] 138 139 self.log('lr', lr, prog_bar=True) 140 self.log('train_loss', loss) 141 142 return {'loss': loss, 'lr': lr} 143 144 def validation_step(self, batch, batch_idx, dataloader_idx=0): 145 """ 146 Lightning calls this inside the validation loop with the data from the validation dataloader 147 passed in as `batch`. 148 """ 149 _, _, _, subtokens_mask, _, punct_labels, capit_labels = batch 150 val_loss, punct_logits, capit_logits = self._make_step(batch) 151 152 subtokens_mask = subtokens_mask > 0.5 153 punct_preds = torch.argmax(punct_logits, axis=-1)[subtokens_mask] 154 punct_labels = punct_labels[subtokens_mask] 155 self.punct_class_report.update(punct_preds, punct_labels) 156 157 capit_preds = torch.argmax(capit_logits, axis=-1)[subtokens_mask] 158 capit_labels = capit_labels[subtokens_mask] 159 self.capit_class_report.update(capit_preds, capit_labels) 160 161 return { 162 'val_loss': val_loss, 163 'punct_tp': self.punct_class_report.tp, 164 'punct_fn': self.punct_class_report.fn, 165 'punct_fp': self.punct_class_report.fp, 166 'capit_tp': self.capit_class_report.tp, 167 'capit_fn': self.capit_class_report.fn, 168 'capit_fp': self.capit_class_report.fp, 169 } 170 171 def test_step(self, batch, batch_idx, dataloader_idx=0): 172 """ 173 Lightning calls this inside the validation loop with the data from the validation dataloader 174 passed in as `batch`. 175 """ 176 _, _, _, subtokens_mask, _, punct_labels, capit_labels = batch 177 test_loss, punct_logits, capit_logits = self._make_step(batch) 178 179 subtokens_mask = subtokens_mask > 0.5 180 punct_preds = torch.argmax(punct_logits, axis=-1)[subtokens_mask] 181 punct_labels = punct_labels[subtokens_mask] 182 self.punct_class_report.update(punct_preds, punct_labels) 183 184 capit_preds = torch.argmax(capit_logits, axis=-1)[subtokens_mask] 185 capit_labels = capit_labels[subtokens_mask] 186 self.capit_class_report.update(capit_preds, capit_labels) 187 188 return { 189 'test_loss': test_loss, 190 'punct_tp': self.punct_class_report.tp, 191 'punct_fn': self.punct_class_report.fn, 192 'punct_fp': self.punct_class_report.fp, 193 'capit_tp': self.capit_class_report.tp, 194 'capit_fn': self.capit_class_report.fn, 195 'capit_fp': self.capit_class_report.fp, 196 } 197 198 def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0): 199 """ 200 Called at the end of validation to aggregate outputs. 201 outputs: list of individual outputs of each validation step. 202 """ 203 avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() 204 205 # calculate metrics and log classification report for Punctuation task 206 punct_precision, punct_recall, punct_f1, punct_report = self.punct_class_report.compute() 207 logging.info(f'Punctuation report: {punct_report}') 208 209 # calculate metrics and log classification report for Capitalization task 210 capit_precision, capit_recall, capit_f1, capit_report = self.capit_class_report.compute() 211 logging.info(f'Capitalization report: {capit_report}') 212 213 self.log('val_loss', avg_loss, prog_bar=True) 214 self.log('punct_precision', punct_precision) 215 self.log('punct_f1', punct_f1) 216 self.log('punct_recall', punct_recall) 217 self.log('capit_precision', capit_precision) 218 self.log('capit_f1', capit_f1) 219 self.log('capit_recall', capit_recall) 220 221 self.punct_class_report.reset() 222 self.capit_class_report.reset() 223 224 def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0): 225 """ 226 Called at the end of test to aggregate outputs. 227 outputs: list of individual outputs of each validation step. 228 """ 229 avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() 230 231 # calculate metrics and log classification report for Punctuation task 232 punct_precision, punct_recall, punct_f1, punct_report = self.punct_class_report.compute() 233 logging.info(f'Punctuation report: {punct_report}') 234 235 # calculate metrics and log classification report for Capitalization task 236 capit_precision, capit_recall, capit_f1, capit_report = self.capit_class_report.compute() 237 logging.info(f'Capitalization report: {capit_report}') 238 239 self.log('test_loss', avg_loss, prog_bar=True) 240 self.log('punct_precision', punct_precision) 241 self.log('punct_f1', punct_f1) 242 self.log('punct_recall', punct_recall) 243 self.log('capit_precision', capit_precision) 244 self.log('capit_f1', capit_f1) 245 self.log('capit_recall', capit_recall) 246 247 def update_data_dir(self, data_dir: str) -> None: 248 """ 249 Update data directory 250 251 Args: 252 data_dir: path to data directory 253 """ 254 if os.path.exists(data_dir): 255 logging.info(f'Setting model.dataset.data_dir to {data_dir}.') 256 self._cfg.dataset.data_dir = data_dir 257 else: 258 raise ValueError(f'{data_dir} not found') 259 260 def setup_training_data(self, train_data_config: Optional[DictConfig] = None): 261 """Setup training data""" 262 if train_data_config is None: 263 train_data_config = self._cfg.train_ds 264 265 # for older(pre - 1.0.0.b3) configs compatibility 266 if not hasattr(self._cfg, "class_labels") or self._cfg.class_labels is None: 267 OmegaConf.set_struct(self._cfg, False) 268 self._cfg.class_labels = {} 269 self._cfg.class_labels = OmegaConf.create( 270 {'punct_labels_file': 'punct_label_ids.csv', 'capit_labels_file': 'capit_label_ids.csv'} 271 ) 272 273 self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config) 274 275 if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: 276 self.register_artifact('class_labels.punct_labels_file', self._train_dl.dataset.punct_label_ids_file) 277 self.register_artifact('class_labels.capit_labels_file', self._train_dl.dataset.capit_label_ids_file) 278 279 # save label maps to the config 280 self._cfg.punct_label_ids = OmegaConf.create(self._train_dl.dataset.punct_label_ids) 281 self._cfg.capit_label_ids = OmegaConf.create(self._train_dl.dataset.capit_label_ids) 282 283 def setup_validation_data(self, val_data_config: Optional[Dict] = None): 284 """ 285 Setup validaton data 286 287 val_data_config: validation data config 288 """ 289 if val_data_config is None: 290 val_data_config = self._cfg.validation_ds 291 292 self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config) 293 294 def setup_test_data(self, test_data_config: Optional[Dict] = None): 295 if test_data_config is None: 296 test_data_config = self._cfg.test_ds 297 self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config) 298 299 def _setup_dataloader_from_config(self, cfg: DictConfig): 300 # use data_dir specified in the ds_item to run evaluation on multiple datasets 301 if 'ds_item' in cfg and cfg.ds_item is not None: 302 data_dir = cfg.ds_item 303 else: 304 data_dir = self._cfg.dataset.data_dir 305 306 text_file = os.path.join(data_dir, cfg.text_file) 307 label_file = os.path.join(data_dir, cfg.labels_file) 308 309 dataset = BertPunctuationCapitalizationDataset( 310 tokenizer=self.tokenizer, 311 text_file=text_file, 312 label_file=label_file, 313 pad_label=self._cfg.dataset.pad_label, 314 punct_label_ids=self._cfg.punct_label_ids, 315 capit_label_ids=self._cfg.capit_label_ids, 316 max_seq_length=self._cfg.dataset.max_seq_length, 317 ignore_extra_tokens=self._cfg.dataset.ignore_extra_tokens, 318 ignore_start_end=self._cfg.dataset.ignore_start_end, 319 use_cache=self._cfg.dataset.use_cache, 320 num_samples=cfg.num_samples, 321 punct_label_ids_file=self._cfg.class_labels.punct_labels_file 322 if 'class_labels' in self._cfg 323 else 'punct_label_ids.csv', 324 capit_label_ids_file=self._cfg.class_labels.capit_labels_file 325 if 'class_labels' in self._cfg 326 else 'capit_label_ids.csv', 327 ) 328 329 return torch.utils.data.DataLoader( 330 dataset=dataset, 331 collate_fn=dataset.collate_fn, 332 batch_size=cfg.batch_size, 333 shuffle=cfg.shuffle, 334 num_workers=self._cfg.dataset.num_workers, 335 pin_memory=self._cfg.dataset.pin_memory, 336 drop_last=self._cfg.dataset.drop_last, 337 ) 338 339 def _setup_infer_dataloader( 340 self, queries: List[str], batch_size: int, max_seq_length: int, step: int, margin: int, 341 ) -> torch.utils.data.DataLoader: 342 """ 343 Setup function for a infer data loader. 344 345 Args: 346 model: a ``PunctuationCapitalizationModel`` instance for which data loader is created. 347 queries: lower cased text without punctuation 348 batch_size: batch size to use during inference 349 max_seq_length: length of segments into which queries are split. ``max_seq_length`` includes ``[CLS]`` and 350 ``[SEP]`` so every segment contains at most ``max_seq_length-2`` tokens from input a query. 351 step: number of tokens by which a segment is offset to a previous segment. Parameter ``step`` cannot be greater 352 than ``max_seq_length-2``. 353 margin: number of tokens near the edge of a segment which label probabilities are not used in final prediction 354 computation. 355 Returns: 356 A pytorch DataLoader. 357 """ 358 if max_seq_length is None: 359 max_seq_length = self._cfg.dataset.max_seq_length 360 if step is None: 361 step = self._cfg.dataset.step 362 if margin is None: 363 margin = self._cfg.dataset.margin 364 365 dataset = BertPunctuationCapitalizationInferDataset( 366 tokenizer=self.tokenizer, queries=queries, max_seq_length=max_seq_length, step=step, margin=margin 367 ) 368 return torch.utils.data.DataLoader( 369 dataset=dataset, 370 collate_fn=dataset.collate_fn, 371 batch_size=batch_size, 372 shuffle=False, 373 num_workers=self._cfg.dataset.num_workers, 374 pin_memory=self._cfg.dataset.pin_memory, 375 drop_last=False, 376 ) 377 378 @staticmethod 379 def _remove_margins(tensor, margin_size, keep_left, keep_right): 380 tensor = tensor.detach().clone() 381 if not keep_left: 382 tensor = tensor[margin_size + 1 :] # remove left margin and CLS token 383 if not keep_right: 384 tensor = tensor[: tensor.shape[0] - margin_size - 1] # remove right margin and SEP token 385 return tensor 386 387 def _transform_logit_to_prob_and_remove_margins_and_extract_word_probs( 388 self, 389 punct_logits: torch.Tensor, 390 capit_logits: torch.Tensor, 391 subtokens_mask: torch.Tensor, 392 start_word_ids: Tuple[int], 393 margin: int, 394 is_first: Tuple[bool], 395 is_last: Tuple[bool], 396 ) -> Tuple[List[np.ndarray], List[np.ndarray], List[int]]: 397 """ 398 Applies softmax to get punctuation and capitalization probabilities, applies ``subtokens_mask`` to extract 399 probabilities for words from probabilities for tokens, removes ``margin`` probabilities near edges of a segment. 400 Left margin of the first segment in a query and right margin of the last segment in a query are not removed. 401 Calculates new ``start_word_ids`` taking into the account the margins. If the left margin of a segment is removed 402 corresponding start word index is increased by number of words (number of nonzero values in corresponding 403 ``subtokens_mask``) in the margin. 404 Args: 405 punct_logits: a float tensor of shape ``[batch_size, segment_length, number_of_punctuation_labels]`` 406 capit_logits: a float tensor of shape ``[batch_size, segment_length, number_of_capitalization_labels]`` 407 subtokens_mask: a float tensor of shape ``[batch_size, segment_length]`` 408 start_word_ids: indices of segment first words in a query 409 margin: number of tokens near edges of a segment which probabilities are discarded 410 is_first: is segment the first segment in a query 411 is_last: is segment the last segment in a query 412 Returns: 413 b_punct_probs: list containing ``batch_size`` numpy arrays. The numpy arrays have shapes 414 ``[number_of_word_in_this_segment, number_of_punctuation_labels]``. Word punctuation probabilities for 415 segments in the batch. 416 b_capit_probs: list containing ``batch_size`` numpy arrays. The numpy arrays have shapes 417 ``[number_of_word_in_this_segment, number_of_capitalization_labels]``. Word capitalization probabilities for 418 segments in the batch. 419 new_start_word_ids: indices of segment first words in a query after margin removal 420 """ 421 new_start_word_ids = list(start_word_ids) 422 subtokens_mask = subtokens_mask > 0.5 423 b_punct_probs, b_capit_probs = [], [] 424 for i, (first, last, pl, cl, stm) in enumerate( 425 zip(is_first, is_last, punct_logits, capit_logits, subtokens_mask) 426 ): 427 if not first: 428 new_start_word_ids[i] += torch.count_nonzero(stm[: margin + 1]).numpy() # + 1 is for [CLS] token 429 stm = self._remove_margins(stm, margin, keep_left=first, keep_right=last) 430 for b_probs, logits in [(b_punct_probs, pl), (b_capit_probs, cl)]: 431 p = torch.nn.functional.softmax( 432 self._remove_margins(logits, margin, keep_left=first, keep_right=last)[stm], dim=-1, 433 ) 434 b_probs.append(p.detach().cpu().numpy()) 435 return b_punct_probs, b_capit_probs, new_start_word_ids 436 437 @staticmethod 438 def _move_acc_probs_to_token_preds( 439 pred: List[int], acc_prob: np.ndarray, number_of_probs_to_move: int 440 ) -> Tuple[List[int], np.ndarray]: 441 """ 442 ``number_of_probs_to_move`` rows in the beginning are removed from ``acc_prob``. From every remove row the label 443 with the largest probability is selected and appended to ``pred``. 444 Args: 445 pred: list with ready label indices for a query 446 acc_prob: numpy array of shape ``[number_of_words_for_which_probabilities_are_accumulated, number_of_labels]`` 447 number_of_probs_to_move: int 448 Returns: 449 pred: list with ready label indices for a query 450 acc_prob: numpy array of shape 451 ``[number_of_words_for_which_probabilities_are_accumulated - number_of_probs_to_move, number_of_labels]`` 452 """ 453 if number_of_probs_to_move > acc_prob.shape[0]: 454 raise ValueError( 455 f"Not enough accumulated probabilities. Number_of_probs_to_move={number_of_probs_to_move} " 456 f"acc_prob.shape={acc_prob.shape}" 457 ) 458 if number_of_probs_to_move > 0: 459 pred = pred + list(np.argmax(acc_prob[:number_of_probs_to_move], axis=-1)) 460 acc_prob = acc_prob[number_of_probs_to_move:] 461 return pred, acc_prob 462 463 @staticmethod 464 def _update_accumulated_probabilities(acc_prob: np.ndarray, update: np.ndarray) -> np.ndarray: 465 """ 466 Args: 467 acc_prob: numpy array of shape ``[A, L]`` 468 update: numpy array of shape ``[A + N, L]`` 469 Returns: 470 numpy array of shape ``[A + N, L]`` 471 """ 472 acc_prob = np.concatenate([acc_prob * update[: acc_prob.shape[0]], update[acc_prob.shape[0] :]], axis=0) 473 return acc_prob 474 475 def apply_punct_capit_predictions(self, query: str, punct_preds: List[int], capit_preds: List[int]) -> str: 476 """ 477 Restores punctuation and capitalization in ``query``. 478 Args: 479 query: a string without punctuation and capitalization 480 punct_preds: ids of predicted punctuation labels 481 capit_preds: ids of predicted capitalization labels 482 Returns: 483 a query with restored punctuation and capitalization 484 """ 485 query = query.strip().split() 486 assert len(query) == len( 487 punct_preds 488 ), f"len(query)={len(query)} len(punct_preds)={len(punct_preds)}, query[:30]={query[:30]}" 489 assert len(query) == len( 490 capit_preds 491 ), f"len(query)={len(query)} len(capit_preds)={len(capit_preds)}, query[:30]={query[:30]}" 492 punct_ids_to_labels = {v: k for k, v in self._cfg.punct_label_ids.items()} 493 capit_ids_to_labels = {v: k for k, v in self._cfg.capit_label_ids.items()} 494 query_with_punct_and_capit = '' 495 for j, word in enumerate(query): 496 punct_label = punct_ids_to_labels[punct_preds[j]] 497 capit_label = capit_ids_to_labels[capit_preds[j]] 498 499 if capit_label != self._cfg.dataset.pad_label: 500 word = word.capitalize() 501 query_with_punct_and_capit += word 502 if punct_label != self._cfg.dataset.pad_label: 503 query_with_punct_and_capit += punct_label 504 query_with_punct_and_capit += ' ' 505 return query_with_punct_and_capit[:-1] 506 507 def get_labels(self, punct_preds: List[int], capit_preds: List[int]) -> str: 508 """ 509 Returns punctuation and capitalization labels in NeMo format (see https://docs.nvidia.com/deeplearning/nemo/ 510 user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format). 511 Args: 512 punct_preds: ids of predicted punctuation labels 513 capit_preds: ids of predicted capitalization labels 514 Returns: 515 labels in NeMo format 516 """ 517 assert len(capit_preds) == len( 518 punct_preds 519 ), f"len(capit_preds)={len(capit_preds)} len(punct_preds)={len(punct_preds)}" 520 punct_ids_to_labels = {v: k for k, v in self._cfg.punct_label_ids.items()} 521 capit_ids_to_labels = {v: k for k, v in self._cfg.capit_label_ids.items()} 522 result = '' 523 for capit_label, punct_label in zip(capit_preds, punct_preds): 524 punct_label = punct_ids_to_labels[punct_label] 525 capit_label = capit_ids_to_labels[capit_label] 526 result += punct_label + capit_label + ' ' 527 return result[:-1] 528 529 def add_punctuation_capitalization( 530 self, 531 queries: List[str], 532 batch_size: int = None, 533 max_seq_length: int = 64, 534 step: int = 8, 535 margin: int = 16, 536 return_labels: bool = False, 537 ) -> List[str]: 538 """ 539 Adds punctuation and capitalization to the queries. Use this method for inference. 540 541 Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments 542 which then processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization 543 including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is shift 544 between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near 545 borders of segments which have only one side context. 546 547 If segments overlap, probabilities of overlapping predictions are multiplied and then the label with 548 corresponding to the maximum probability is selected. 549 550 Args: 551 queries: lower cased text without punctuation 552 batch_size: batch size to use during inference 553 max_seq_length: maximum sequence length of segment after tokenization. 554 step: relative shift of consequent segments into which long queries are split. Long queries are split into 555 segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are 556 tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is 557 tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. 558 margin: number of subtokens in the beginning and the end of segments which are not used for prediction 559 computation. The first segment does not have left margin and the last segment does not have right 560 margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``, 561 ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments 562 ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], 563 ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions 564 computation, margins are removed. In the next list, subtokens which logits are not used for final 565 predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], 566 ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. 567 return_labels: whether to return labels in NeMo format (see https://docs.nvidia.com/deeplearning/nemo/ 568 user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format) instead of queries 569 with restored punctuation and capitalization. 570 Returns: 571 result: text with added capitalization and punctuation or punctuation and capitalization labels 572 """ 573 if len(queries) == 0: 574 return [] 575 if batch_size is None: 576 batch_size = len(queries) 577 logging.info(f'Using batch size {batch_size} for inference') 578 result: List[str] = [] 579 mode = self.training 580 try: 581 self.eval() 582 infer_datalayer = self._setup_infer_dataloader(queries, batch_size, max_seq_length, step, margin) 583 # Predicted labels for queries. List of labels for every query 584 all_punct_preds: List[List[int]] = [[] for _ in queries] 585 all_capit_preds: List[List[int]] = [[] for _ in queries] 586 # Accumulated probabilities (or product of probabilities acquired from different segments) of punctuation 587 # and capitalization. Probabilities for words in a query are extracted using `subtokens_mask`. Probabilities 588 # for newly processed words are appended to the accumulated probabilities. If probabilities for a word are 589 # already present in `acc_probs`, old probabilities are replaced with a product of old probabilities 590 # and probabilities acquired from new segment. Segments are processed in an order they appear in an 591 # input query. When all segments with a word are processed, a label with the highest probability 592 # (or product of probabilities) is chosen and appended to an appropriate list in `all_preds`. After adding 593 # prediction to `all_preds`, probabilities for a word are removed from `acc_probs`. 594 acc_punct_probs: List[Optional[np.ndarray]] = [None for _ in queries] 595 acc_capit_probs: List[Optional[np.ndarray]] = [None for _ in queries] 596 d = self.device 597 for batch_i, batch in tqdm( 598 enumerate(infer_datalayer), total=ceil(len(infer_datalayer.dataset) / batch_size), unit="batch" 599 ): 600 inp_ids, inp_type_ids, inp_mask, subtokens_mask, start_word_ids, query_ids, is_first, is_last = batch 601 punct_logits, capit_logits = self.forward( 602 input_ids=inp_ids.to(d), token_type_ids=inp_type_ids.to(d), attention_mask=inp_mask.to(d), 603 ) 604 _res = self._transform_logit_to_prob_and_remove_margins_and_extract_word_probs( 605 punct_logits, capit_logits, subtokens_mask, start_word_ids, margin, is_first, is_last 606 ) 607 punct_probs, capit_probs, start_word_ids = _res 608 for i, (q_i, start_word_id, bpp_i, bcp_i) in enumerate( 609 zip(query_ids, start_word_ids, punct_probs, capit_probs) 610 ): 611 for all_preds, acc_probs, b_probs_i in [ 612 (all_punct_preds, acc_punct_probs, bpp_i), 613 (all_capit_preds, acc_capit_probs, bcp_i), 614 ]: 615 if acc_probs[q_i] is None: 616 acc_probs[q_i] = b_probs_i 617 else: 618 all_preds[q_i], acc_probs[q_i] = self._move_acc_probs_to_token_preds( 619 all_preds[q_i], acc_probs[q_i], start_word_id - len(all_preds[q_i]), 620 ) 621 acc_probs[q_i] = self._update_accumulated_probabilities(acc_probs[q_i], b_probs_i) 622 for all_preds, acc_probs in [(all_punct_preds, acc_punct_probs), (all_capit_preds, acc_capit_probs)]: 623 for q_i, (pred, prob) in enumerate(zip(all_preds, acc_probs)): 624 if prob is not None: 625 all_preds[q_i], acc_probs[q_i] = self._move_acc_probs_to_token_preds(pred, prob, len(prob)) 626 for i, query in enumerate(queries): 627 result.append( 628 self.get_labels(all_punct_preds[i], all_capit_preds[i]) 629 if return_labels 630 else self.apply_punct_capit_predictions(query, all_punct_preds[i], all_capit_preds[i]) 631 ) 632 finally: 633 # set mode back to its original value 634 self.train(mode=mode) 635 return result 636 637 @classmethod 638 def list_available_models(cls) -> Optional[Dict[str, str]]: 639 """ 640 This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud. 641 642 Returns: 643 List of available pre-trained models. 644 """ 645 result = [] 646 result.append( 647 PretrainedModelInfo( 648 pretrained_model_name="punctuation_en_bert", 649 location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_bert/versions/1.0.0rc1/files/punctuation_en_bert.nemo", 650 description="The model was trained with NeMo BERT base uncased checkpoint on a subset of data from the following sources: Tatoeba sentences, books from Project Gutenberg, Fisher transcripts.", 651 ) 652 ) 653 result.append( 654 PretrainedModelInfo( 655 pretrained_model_name="punctuation_en_distilbert", 656 location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_distilbert/versions/1.0.0rc1/files/punctuation_en_distilbert.nemo", 657 description="The model was trained with DiltilBERT base uncased checkpoint from HuggingFace on a subset of data from the following sources: Tatoeba sentences, books from Project Gutenberg, Fisher transcripts.", 658 ) 659 ) 660 return result 661 662 @property 663 def input_module(self): 664 return self.bert_model 665 666 @property 667 def output_module(self): 668 return self 669 [end of nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py] [start of nemo/collections/nlp/modules/common/tokenizer_utils.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os.path 16 from dataclasses import MISSING, dataclass 17 from os import path 18 from typing import Dict, List, Optional 19 20 import nemo 21 from nemo.collections.common.tokenizers.bytelevel_tokenizers import ByteLevelTokenizer 22 from nemo.collections.common.tokenizers.char_tokenizer import CharTokenizer 23 from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer 24 from nemo.collections.common.tokenizers.word_tokenizer import WordTokenizer 25 from nemo.collections.common.tokenizers.youtokentome_tokenizer import YouTokenToMeTokenizer 26 from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import get_huggingface_pretrained_lm_models_list 27 from nemo.collections.nlp.modules.common.lm_utils import get_pretrained_lm_models_list 28 from nemo.collections.nlp.parts.nlp_overrides import HAVE_APEX 29 from nemo.utils import logging 30 31 try: 32 from nemo.collections.nlp.modules.common.megatron.megatron_utils import get_megatron_tokenizer 33 34 HAVE_APEX = True 35 36 except (ImportError, ModuleNotFoundError): 37 HAVE_APEX = False 38 39 40 __all__ = ['get_tokenizer', 'get_tokenizer_list'] 41 42 43 megatron_tokenizer_model_map = { 44 'BertWordPieceLowerCase': 'megatron-bert-345m-uncased', 45 'BertWordPieceCase': 'megatron-bert-345m-cased', 46 'GPT2BPETokenizer': 'megatron-gpt-345m', 47 } 48 49 50 def get_tokenizer_list() -> List[str]: 51 """ 52 Returns all all supported tokenizer names 53 """ 54 s = set(get_pretrained_lm_models_list()) 55 s.update(set(get_huggingface_pretrained_lm_models_list(include_external=True))) 56 return ["sentencepiece", "char", "word"] + list(s) 57 58 59 @dataclass 60 class TokenizerConfig: 61 library: str = MISSING 62 tokenizer_model: Optional[str] = None 63 vocab_size: Optional[int] = None 64 vocab_file: Optional[str] = None 65 special_tokens: Optional[Dict[str, str]] = None 66 bpe_dropout: Optional[float] = 0.0 67 coverage: Optional[float] = 0.999 68 training_sample_size: Optional[int] = None 69 r2l: Optional[bool] = False 70 71 72 def get_tokenizer( 73 tokenizer_name: str, 74 tokenizer_model: Optional[str] = None, 75 vocab_file: Optional[str] = None, 76 merges_file: Optional[str] = None, 77 special_tokens: Optional[Dict[str, str]] = None, 78 use_fast: Optional[bool] = False, 79 bpe_dropout: Optional[float] = 0.0, 80 ): 81 """ 82 Args: 83 tokenizer_name: sentencepiece or pretrained model from the hugging face list, 84 for example: bert-base-cased 85 To see the list of all HuggingFace pretrained models, use: nemo_nlp.modules.common.get_huggingface_pretrained_lm_models_list() 86 tokenizer_model: tokenizer model file of sentencepiece or youtokentome 87 special_tokens: dict of special tokens 88 vocab_file: path to vocab file 89 use_fast: (only for HuggingFace AutoTokenizer) set to True to use fast HuggingFace tokenizer 90 bpe_dropout: (only supported by YTTM tokenizer) BPE dropout tries to corrupt the standard segmentation procedure of BPE to help 91 model better learn word compositionality and become robust to segmentation errors. 92 It has emperically been shown to improve inference time BLEU scores. 93 """ 94 if special_tokens is None: 95 special_tokens_dict = {} 96 else: 97 special_tokens_dict = special_tokens 98 99 if 'megatron' in tokenizer_name: 100 if not HAVE_APEX: 101 raise RuntimeError("Apex required to use megatron.") 102 if vocab_file is None: 103 vocab_file = nemo.collections.nlp.modules.common.megatron.megatron_utils.get_megatron_vocab_file( 104 tokenizer_name 105 ) 106 merges_file = nemo.collections.nlp.modules.common.megatron.megatron_utils.get_megatron_merges_file( 107 tokenizer_name 108 ) 109 tokenizer_name = get_megatron_tokenizer(tokenizer_name) 110 111 if tokenizer_name == 'sentencepiece': 112 return nemo.collections.common.tokenizers.sentencepiece_tokenizer.SentencePieceTokenizer( 113 model_path=tokenizer_model, special_tokens=special_tokens, legacy=True 114 ) 115 elif tokenizer_name == 'yttm': 116 return YouTokenToMeTokenizer(model_path=tokenizer_model, bpe_dropout=bpe_dropout) 117 elif tokenizer_name == 'word': 118 return WordTokenizer(vocab_file=vocab_file, **special_tokens_dict) 119 elif tokenizer_name == 'char': 120 return CharTokenizer(vocab_file=vocab_file, **special_tokens_dict) 121 122 logging.info( 123 f"Getting HuggingFace AutoTokenizer with pretrained_model_name: {tokenizer_name}, vocab_file: {vocab_file}, special_tokens_dict: {special_tokens_dict}, and use_fast: {use_fast}" 124 ) 125 return AutoTokenizer( 126 pretrained_model_name=tokenizer_name, 127 vocab_file=vocab_file, 128 merges_file=merges_file, 129 **special_tokens_dict, 130 use_fast=use_fast, 131 ) 132 133 134 def get_nmt_tokenizer( 135 library: str = 'yttm', 136 model_name: Optional[str] = None, 137 tokenizer_model: Optional[str] = None, 138 vocab_file: Optional[str] = None, 139 merges_file: Optional[str] = None, 140 special_tokens: Optional[Dict[str, str]] = None, 141 use_fast: Optional[bool] = False, 142 bpe_dropout: Optional[float] = 0.0, 143 r2l: Optional[bool] = False, 144 ): 145 """ 146 Args: 147 model_name: if using a pretrained model from NeMo, HuggingFace, or Megatron 148 tokenizer_model: tokenizer model file of sentencepiece or youtokentome 149 special_tokens: dict of special tokens 150 vocab_file: path to vocab file 151 use_fast: (only for HuggingFace AutoTokenizer) set to True to use fast HuggingFace tokenizer 152 bpe_dropout: (only supported by YTTM tokenizer) BPE dropout tries to corrupt the standard segmentation procedure 153 of BPE to help model better learn word compositionality and become robust to segmentation errors. 154 It has empirically been shown to improve inference time BLEU scores. 155 r2l: Whether to return subword IDs from right to left 156 """ 157 if special_tokens is None: 158 special_tokens_dict = {} 159 else: 160 special_tokens_dict = special_tokens 161 162 if library == 'yttm': 163 logging.info(f'Getting YouTokenToMeTokenizer with model: {tokenizer_model} with r2l: {r2l}.') 164 return YouTokenToMeTokenizer(model_path=tokenizer_model, bpe_dropout=bpe_dropout, r2l=r2l) 165 elif library == 'huggingface': 166 logging.info(f'Getting HuggingFace AutoTokenizer with pretrained_model_name: {model_name}') 167 return AutoTokenizer( 168 pretrained_model_name=model_name, 169 vocab_file=vocab_file, 170 merges_file=merges_file, 171 **special_tokens_dict, 172 use_fast=use_fast, 173 ) 174 elif library == 'sentencepiece': 175 logging.info(f'Getting SentencePiece with model: {tokenizer_model}') 176 return nemo.collections.common.tokenizers.sentencepiece_tokenizer.SentencePieceTokenizer( 177 model_path=tokenizer_model, special_tokens=special_tokens_dict 178 ) 179 elif library == 'byte-level': 180 logging.info(f'Using byte-level tokenization') 181 return ByteLevelTokenizer() 182 elif library == 'megatron': 183 if model_name in megatron_tokenizer_model_map: 184 model_name = megatron_tokenizer_model_map[model_name] 185 logging.info( 186 f'Getting Megatron tokenizer for pretrained model name: {model_name} and custom vocab file: {vocab_file}' 187 ) 188 return get_tokenizer(tokenizer_name=model_name, vocab_file=vocab_file, merges_file=merges_file) 189 else: 190 raise NotImplementedError( 191 'Currently we only support "yttm", "huggingface", "sentencepiece", "megatron", and "byte-level" tokenizer' 192 'libraries.' 193 ) 194 [end of nemo/collections/nlp/modules/common/tokenizer_utils.py] [start of nemo/core/connectors/save_restore_connector.py] 1 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. 2 # Copyright 2015 and onwards Google, Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import os 17 import shutil 18 import tarfile 19 import tempfile 20 import uuid 21 from typing import Optional, Union 22 23 import torch 24 from omegaconf import DictConfig, OmegaConf 25 from omegaconf.omegaconf import open_dict 26 from pytorch_lightning.trainer.trainer import Trainer 27 28 from nemo.utils import logging, model_utils 29 from nemo.utils.app_state import AppState 30 from nemo.utils.get_rank import is_global_rank_zero 31 32 33 class SaveRestoreConnector: 34 def __init__(self) -> None: 35 self._model_config_yaml = "model_config.yaml" 36 self._model_weights_ckpt = "model_weights.ckpt" 37 38 def save_to(self, model, save_path: str): 39 """ 40 Saves model instance (weights and configuration) into .nemo file. 41 You can use "restore_from" method to fully restore instance from .nemo file. 42 43 .nemo file is an archive (tar.gz) with the following: 44 model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor 45 model_wights.chpt - model checkpoint 46 47 Args: 48 model: ModelPT object to be saved. 49 save_path: Path to .nemo file where model instance should be saved 50 """ 51 52 if is_global_rank_zero(): 53 with tempfile.TemporaryDirectory() as tmpdir: 54 config_yaml = os.path.join(tmpdir, self.model_config_yaml) 55 model_weights = os.path.join(tmpdir, self.model_weights_ckpt) 56 model.to_config_file(path2yaml_file=config_yaml) 57 if hasattr(model, 'artifacts') and model.artifacts is not None: 58 self._handle_artifacts(model, nemo_file_folder=tmpdir) 59 # We should not update self._cfg here - the model can still be in use 60 self._update_artifact_paths(model, path2yaml_file=config_yaml) 61 self._save_state_dict_to_disk(model.state_dict(), model_weights) 62 self._make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir) 63 else: 64 return 65 66 def restore_from( 67 self, 68 calling_cls, 69 restore_path: str, 70 override_config_path: Optional[Union[OmegaConf, str]] = None, 71 map_location: Optional[torch.device] = None, 72 strict: bool = True, 73 return_config: bool = False, 74 trainer: Trainer = None, 75 ): 76 """ 77 Restores model instance (weights and configuration) into .nemo file 78 79 Args: 80 restore_path: path to .nemo file from which model should be instantiated 81 override_config_path: path to a yaml config that will override the internal 82 config file or an OmegaConf / DictConfig object representing the model config. 83 map_location: Optional torch.device() to map the instantiated model to a device. 84 By default (None), it will select a GPU if available, falling back to CPU otherwise. 85 strict: Passed to load_state_dict. By default True 86 return_config: If set to true, will return just the underlying config of the restored 87 model as an OmegaConf DictConfig object without instantiating the model. 88 89 Example: 90 ``` 91 model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo') 92 assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel) 93 ``` 94 95 Returns: 96 An instance of type cls or its underlying config (if return_config is set). 97 """ 98 # Get path where the command is executed - the artifacts will be "retrieved" there 99 # (original .nemo behavior) 100 cwd = os.getcwd() 101 102 if map_location is None: 103 if torch.cuda.is_available(): 104 map_location = torch.device('cuda') 105 else: 106 map_location = torch.device('cpu') 107 108 with tempfile.TemporaryDirectory() as tmpdir: 109 try: 110 self._unpack_nemo_file(path2file=restore_path, out_folder=tmpdir) 111 os.chdir(tmpdir) 112 if override_config_path is None: 113 config_yaml = os.path.join(tmpdir, self.model_config_yaml) 114 else: 115 # can be str path or OmegaConf / DictConfig object 116 config_yaml = override_config_path 117 if not isinstance(config_yaml, (OmegaConf, DictConfig)): 118 conf = OmegaConf.load(config_yaml) 119 else: 120 conf = config_yaml 121 if override_config_path is not None: 122 # Resolve the override config 123 conf = OmegaConf.to_container(conf, resolve=True) 124 conf = OmegaConf.create(conf) 125 # If override is top level config, extract just `model` from it 126 if 'model' in conf: 127 conf = conf.model 128 129 if return_config: 130 instance = conf 131 return instance 132 else: 133 app_state = AppState() 134 if app_state.model_parallel_rank is not None and app_state.model_parallel_size > 1: 135 model_weights = self._inject_model_parallel_rank_for_ckpt(tmpdir, self.model_weights_ckpt) 136 else: 137 model_weights = os.path.join(tmpdir, self.model_weights_ckpt) 138 OmegaConf.set_struct(conf, True) 139 os.chdir(cwd) 140 # get the class 141 calling_cls._set_model_restore_state(is_being_restored=True, folder=tmpdir) 142 instance = calling_cls.from_config_dict(config=conf, trainer=trainer) 143 instance = instance.to(map_location) 144 # add load_state_dict override 145 instance.load_state_dict( 146 self._load_state_dict_from_disk(model_weights, map_location=map_location), strict=strict 147 ) 148 149 logging.info(f'Model {instance.__class__.__name__} was successfully restored from {restore_path}.') 150 instance._set_model_restore_state(is_being_restored=False) 151 finally: 152 os.chdir(cwd) 153 154 return instance 155 156 def extract_state_dict_from(self, restore_path: str, save_dir: str, split_by_module: bool = False): 157 """ 158 Extract the state dict(s) from a provided .nemo tarfile and save it to a directory. 159 160 Args: 161 restore_path: path to .nemo file from which state dict(s) should be extracted 162 save_dir: directory in which the saved state dict(s) should be stored 163 split_by_module: bool flag, which determins whether the output checkpoint should 164 be for the entire Model, or the individual module's that comprise the Model 165 166 Example: 167 To convert the .nemo tarfile into a single Model level PyTorch checkpoint 168 :: 169 state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts') 170 171 172 To restore a model from a Model level checkpoint 173 :: 174 model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration 175 model.load_state_dict(torch.load("./asr_ckpts/model_weights.ckpt")) 176 177 178 To convert the .nemo tarfile into multiple Module level PyTorch checkpoints 179 :: 180 state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts', split_by_module=True) 181 182 183 To restore a module from a Module level checkpoint 184 :: 185 model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration 186 187 # load the individual components 188 model.preprocessor.load_state_dict(torch.load("./asr_ckpts/preprocessor.ckpt")) 189 model.encoder.load_state_dict(torch.load("./asr_ckpts/encoder.ckpt")) 190 model.decoder.load_state_dict(torch.load("./asr_ckpts/decoder.ckpt")) 191 192 193 Returns: 194 The state dict that was loaded from the original .nemo checkpoint 195 """ 196 197 cwd = os.getcwd() 198 199 save_dir = os.path.abspath(save_dir) 200 if not os.path.exists(save_dir): 201 os.makedirs(save_dir, exist_ok=True) 202 203 with tempfile.TemporaryDirectory() as tmpdir: 204 try: 205 self._unpack_nemo_file(path2file=restore_path, out_folder=tmpdir) 206 os.chdir(tmpdir) 207 model_weights = os.path.join(tmpdir, self.model_weights_ckpt) 208 state_dict = self._load_state_dict_from_disk(model_weights) 209 210 if not split_by_module: 211 filepath = os.path.join(save_dir, self.model_weights_ckpt) 212 self._save_state_dict_to_disk(state_dict, filepath) 213 214 else: 215 key_set = set([key.split(".")[0] for key in state_dict.keys()]) 216 for primary_key in key_set: 217 inner_keys = [key for key in state_dict.keys() if key.split(".")[0] == primary_key] 218 state_dict_subset = { 219 ".".join(inner_key.split(".")[1:]): state_dict[inner_key] for inner_key in inner_keys 220 } 221 filepath = os.path.join(save_dir, f"{primary_key}.ckpt") 222 self._save_state_dict_to_disk(state_dict_subset, filepath) 223 224 logging.info(f'Checkpoints from {restore_path} were successfully extracted into {save_dir}.') 225 finally: 226 os.chdir(cwd) 227 228 return state_dict 229 230 def register_artifact(self, model, config_path: str, src: str, verify_src_exists: bool = True): 231 """ Register model artifacts with this function. These artifacts (files) will be included inside .nemo file 232 when model.save_to("mymodel.nemo") is called. 233 234 How it works: 235 1. It always returns existing absolute path which can be used during Model constructor call 236 EXCEPTION: src is None or "" in which case nothing will be done and src will be returned 237 2. It will add (config_path, model_utils.ArtifactItem()) pair to self.artifacts 238 239 If "src" is local existing path, then it will be returned in absolute path form. 240 elif "src" starts with "nemo_file:unique_artifact_name": 241 .nemo will be untarred to a temporary folder location and an actual existing path will be returned 242 else an error will be raised. 243 244 WARNING: use .register_artifact calls in your models' constructors. 245 The returned path is not guaranteed to exist after you have exited your model's constuctor. 246 247 Args: 248 model: ModelPT object to register artifact for. 249 config_path (str): Artifact key. Usually corresponds to the model config. 250 src (str): Path to artifact. 251 verify_src_exists (bool): If set to False, then the artifact is optional and register_artifact will return None even if 252 src is not found. Defaults to True. 253 254 Returns: 255 str: If src is not None or empty it always returns absolute path which is guaranteed to exists during model instnce life 256 """ 257 app_state = AppState() 258 259 artifact_item = model_utils.ArtifactItem() 260 261 # This is for backward compatibility, if the src objects exists simply inside of the tarfile 262 # without its key having been overriden, this pathway will be used. 263 src_obj_name = os.path.basename(src) 264 if app_state.nemo_file_folder is not None: 265 src_obj_path = os.path.abspath(os.path.join(app_state.nemo_file_folder, src_obj_name)) 266 else: 267 src_obj_path = src_obj_name 268 269 # src is a local existing path - register artifact and return exact same path for usage by the model 270 if os.path.exists(os.path.abspath(src)): 271 return_path = os.path.abspath(src) 272 artifact_item.path_type = model_utils.ArtifactPathType.LOCAL_PATH 273 274 # this is the case when artifact must be retried from the nemo file 275 # we are assuming that the location of the right nemo file is available from _MODEL_RESTORE_PATH 276 elif src.startswith("nemo:"): 277 return_path = os.path.abspath(os.path.join(app_state.nemo_file_folder, src[5:])) 278 artifact_item.path_type = model_utils.ArtifactPathType.TAR_PATH 279 280 # backward compatibility implementation 281 elif os.path.exists(src_obj_path): 282 return_path = src_obj_path 283 artifact_item.path_type = model_utils.ArtifactPathType.TAR_PATH 284 else: 285 if verify_src_exists: 286 raise FileNotFoundError( 287 f"src path does not exist or it is not a path in nemo file. src value I got was: {src}. Absolute: {os.path.abspath(src)}" 288 ) 289 else: 290 # artifact is optional and we simply return None 291 return None 292 293 assert os.path.exists(return_path) 294 295 artifact_item.path = os.path.abspath(src) 296 model.artifacts[config_path] = artifact_item 297 # we were called by ModelPT 298 if hasattr(model, "cfg"): 299 with open_dict(model._cfg): 300 OmegaConf.update(model.cfg, config_path, return_path) 301 return return_path 302 303 def _handle_artifacts(self, model, nemo_file_folder): 304 tarfile_artifacts = [] 305 app_state = AppState() 306 for conf_path, artiitem in model.artifacts.items(): 307 if artiitem.path_type == model_utils.ArtifactPathType.LOCAL_PATH: 308 if not os.path.exists(artiitem.path): 309 raise FileNotFoundError(f"Artifact {conf_path} not found at location: {artiitem.path}") 310 311 # Generate new uniq artifact name and copy it to nemo_file_folder 312 # Note uuid.uuid4().hex is guaranteed to be 32 character long 313 artifact_base_name = os.path.basename(artiitem.path) 314 artifact_uniq_name = f"{uuid.uuid4().hex}_{artifact_base_name}" 315 shutil.copy2(artiitem.path, os.path.join(nemo_file_folder, artifact_uniq_name)) 316 317 # Update artifacts registry 318 artiitem.hashed_path = "nemo:" + artifact_uniq_name 319 model.artifacts[conf_path] = artiitem 320 321 elif artiitem.path_type == model_utils.ArtifactPathType.TAR_PATH: 322 # process all tarfile artifacts in one go, so preserve key-value pair 323 tarfile_artifacts.append((conf_path, artiitem)) 324 325 else: 326 raise ValueError(f"Directly referencing artifacts from other nemo files isn't supported yet") 327 328 # Process current tarfile artifacts by unpacking the previous tarfile and extract the artifacts 329 # that are currently required. 330 model_metadata = app_state.get_model_metadata_from_guid(model.model_guid) 331 if len(tarfile_artifacts) > 0 and model_metadata.restoration_path is not None: 332 # Need to step into nemo archive to extract file 333 # Get path where the command is executed - the artifacts will be "retrieved" there 334 # (original .nemo behavior) 335 cwd = os.getcwd() 336 try: 337 # Step into the nemo archive to try and find the file 338 with tempfile.TemporaryDirectory() as archive_dir: 339 self._unpack_nemo_file(path2file=model_metadata.restoration_path, out_folder=archive_dir) 340 os.chdir(archive_dir) 341 for conf_path, artiitem in tarfile_artifacts: 342 # Get basename and copy it to nemo_file_folder 343 if 'nemo:' in artiitem.path: 344 artifact_base_name = artiitem.path.split('nemo:')[1] 345 else: 346 artifact_base_name = os.path.basename(artiitem.path) 347 # no need to hash here as we are in tarfile_artifacts which are already hashed 348 artifact_uniq_name = artifact_base_name 349 shutil.copy2(artifact_base_name, os.path.join(nemo_file_folder, artifact_uniq_name)) 350 351 # Update artifacts registry 352 new_artiitem = model_utils.ArtifactItem() 353 new_artiitem.path = "nemo:" + artifact_uniq_name 354 new_artiitem.path_type = model_utils.ArtifactPathType.TAR_PATH 355 model.artifacts[conf_path] = new_artiitem 356 finally: 357 # change back working directory 358 os.chdir(cwd) 359 360 def _update_artifact_paths(self, model, path2yaml_file): 361 if model.artifacts is not None and len(model.artifacts) > 0: 362 conf = OmegaConf.load(path2yaml_file) 363 for conf_path, item in model.artifacts.items(): 364 if item.hashed_path is None: 365 OmegaConf.update(conf, conf_path, item.path) 366 else: 367 OmegaConf.update(conf, conf_path, item.hashed_path) 368 with open(path2yaml_file, 'w') as fout: 369 OmegaConf.save(config=conf, f=fout, resolve=True) 370 371 def _inject_model_parallel_rank_for_ckpt(self, dirname, basename): 372 app_state = AppState() 373 model_weights = os.path.join(dirname, f'mp_rank_{app_state.model_parallel_rank:02}', basename) 374 return model_weights 375 376 @staticmethod 377 def _make_nemo_file_from_folder(filename, source_dir): 378 dirname = os.path.dirname(filename) 379 os.makedirs(dirname, exist_ok=True) 380 with tarfile.open(filename, "w:gz") as tar: 381 tar.add(source_dir, arcname=".") 382 383 @staticmethod 384 def _unpack_nemo_file(path2file: str, out_folder: str) -> str: 385 if not os.path.exists(path2file): 386 raise FileNotFoundError(f"{path2file} does not exist") 387 tar = tarfile.open(path2file, "r:gz") 388 tar.extractall(path=out_folder) 389 tar.close() 390 return out_folder 391 392 @staticmethod 393 def _save_state_dict_to_disk(state_dict, filepath): 394 torch.save(state_dict, filepath) 395 396 @staticmethod 397 def _load_state_dict_from_disk(model_weights, map_location=None): 398 return torch.load(model_weights, map_location=map_location) 399 400 @property 401 def model_config_yaml(self) -> str: 402 return self._model_config_yaml 403 404 @model_config_yaml.setter 405 def model_config_yaml(self, path: str): 406 self._model_config_yaml = path 407 408 @property 409 def model_weights_ckpt(self) -> str: 410 return self._model_weights_ckpt 411 412 @model_weights_ckpt.setter 413 def model_weights_ckpt(self, path: str): 414 self._model_weights_ckpt = path 415 [end of nemo/core/connectors/save_restore_connector.py] [start of nemo/utils/config_utils.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import copy 16 import inspect 17 from dataclasses import is_dataclass 18 from typing import Dict, List, Optional 19 20 from nemo.utils import logging 21 22 # TODO @blisc: Perhaps refactor instead of import guarding 23 _HAS_HYDRA = True 24 try: 25 from omegaconf import DictConfig, OmegaConf, open_dict 26 except ModuleNotFoundError: 27 _HAS_HYDRA = False 28 29 30 def update_model_config( 31 model_cls: 'nemo.core.config.modelPT.NemoConfig', update_cfg: 'DictConfig', drop_missing_subconfigs: bool = True 32 ): 33 """ 34 Helper class that updates the default values of a ModelPT config class with the values 35 in a DictConfig that mirrors the structure of the config class. 36 37 Assumes the `update_cfg` is a DictConfig (either generated manually, via hydra or instantiated via yaml/model.cfg). 38 This update_cfg is then used to override the default values preset inside the ModelPT config class. 39 40 If `drop_missing_subconfigs` is set, the certain sub-configs of the ModelPT config class will be removed, iff 41 they are not found in the mirrored `update_cfg`. The following sub-configs are subject to potential removal: 42 - `train_ds` 43 - `validation_ds` 44 - `test_ds` 45 - `optim` + nested `sched`. 46 47 Args: 48 model_cls: A subclass of NemoConfig, that details in entirety all of the parameters that constitute 49 the NeMo Model. 50 51 update_cfg: A DictConfig that mirrors the structure of the NemoConfig data class. Used to update the 52 default values of the config class. 53 54 drop_missing_subconfigs: Bool which determins whether to drop certain sub-configs from the NemoConfig 55 class, if the corresponding sub-config is missing from `update_cfg`. 56 57 Returns: 58 A DictConfig with updated values that can be used to instantiate the NeMo Model along with supporting 59 infrastructure. 60 """ 61 if not _HAS_HYDRA: 62 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 63 exit(1) 64 if not (is_dataclass(model_cls) or isinstance(model_cls, DictConfig)): 65 raise ValueError("`model_cfg` must be a dataclass or a structured OmegaConf object") 66 67 if not isinstance(update_cfg, DictConfig): 68 update_cfg = OmegaConf.create(update_cfg) 69 70 if is_dataclass(model_cls): 71 model_cls = OmegaConf.structured(model_cls) 72 73 # Update optional configs 74 model_cls = _update_subconfig( 75 model_cls, update_cfg, subconfig_key='train_ds', drop_missing_subconfigs=drop_missing_subconfigs 76 ) 77 model_cls = _update_subconfig( 78 model_cls, update_cfg, subconfig_key='validation_ds', drop_missing_subconfigs=drop_missing_subconfigs 79 ) 80 model_cls = _update_subconfig( 81 model_cls, update_cfg, subconfig_key='test_ds', drop_missing_subconfigs=drop_missing_subconfigs 82 ) 83 model_cls = _update_subconfig( 84 model_cls, update_cfg, subconfig_key='optim', drop_missing_subconfigs=drop_missing_subconfigs 85 ) 86 87 # Add optim and sched additional keys to model cls 88 model_cls = _add_subconfig_keys(model_cls, update_cfg, subconfig_key='optim') 89 90 # Perform full merge of model config class and update config 91 # Remove ModelPT artifact `target` 92 if 'target' in update_cfg.model: 93 # Assume artifact from ModelPT and pop 94 if 'target' not in model_cls.model: 95 with open_dict(update_cfg.model): 96 update_cfg.model.pop('target') 97 98 model_cfg = OmegaConf.merge(model_cls, update_cfg) 99 100 return model_cfg 101 102 103 def _update_subconfig( 104 model_cfg: 'DictConfig', update_cfg: 'DictConfig', subconfig_key: str, drop_missing_subconfigs: bool 105 ): 106 """ 107 Updates the NemoConfig DictConfig such that: 108 1) If the sub-config key exists in the `update_cfg`, but does not exist in ModelPT config: 109 - Add the sub-config from update_cfg to ModelPT config 110 111 2) If the sub-config key does not exist in `update_cfg`, but exists in ModelPT config: 112 - Remove the sub-config from the ModelPT config; iff the `drop_missing_subconfigs` flag is set. 113 114 Args: 115 model_cfg: A DictConfig instantiated from the NemoConfig subclass. 116 update_cfg: A DictConfig that mirrors the structure of `model_cfg`, used to update its default values. 117 subconfig_key: A str key used to check and update the sub-config. 118 drop_missing_subconfigs: A bool flag, whether to allow deletion of the NemoConfig sub-config, 119 if its mirror sub-config does not exist in the `update_cfg`. 120 121 Returns: 122 The updated DictConfig for the NemoConfig 123 """ 124 if not _HAS_HYDRA: 125 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 126 exit(1) 127 with open_dict(model_cfg.model): 128 # If update config has the key, but model cfg doesnt have the key 129 # Add the update cfg subconfig to the model cfg 130 if subconfig_key in update_cfg.model and subconfig_key not in model_cfg.model: 131 model_cfg.model[subconfig_key] = update_cfg.model[subconfig_key] 132 133 # If update config does not the key, but model cfg has the key 134 # Remove the model cfg subconfig in order to match layout of update cfg 135 if subconfig_key not in update_cfg.model and subconfig_key in model_cfg.model: 136 if drop_missing_subconfigs: 137 model_cfg.model.pop(subconfig_key) 138 139 return model_cfg 140 141 142 def _add_subconfig_keys(model_cfg: 'DictConfig', update_cfg: 'DictConfig', subconfig_key: str): 143 """ 144 For certain sub-configs, the default values specified by the NemoConfig class is insufficient. 145 In order to support every potential value in the merge between the `update_cfg`, it would require 146 explicit definition of all possible cases. 147 148 An example of such a case is Optimizers, and their equivalent Schedulers. All optimizers share a few basic 149 details - such as name and lr, but almost all require additional parameters - such as weight decay. 150 It is impractical to create a config for every single optimizer + every single scheduler combination. 151 152 In such a case, we perform a dual merge. The Optim and Sched Dataclass contain the bare minimum essential 153 components. The extra values are provided via update_cfg. 154 155 In order to enable the merge, we first need to update the update sub-config to incorporate the keys, 156 with dummy temporary values (merge update config with model config). This is done on a copy of the 157 update sub-config, as the actual override values might be overriden by the NemoConfig defaults. 158 159 Then we perform a merge of this temporary sub-config with the actual override config in a later step 160 (merge model_cfg with original update_cfg, done outside this function). 161 162 Args: 163 model_cfg: A DictConfig instantiated from the NemoConfig subclass. 164 update_cfg: A DictConfig that mirrors the structure of `model_cfg`, used to update its default values. 165 subconfig_key: A str key used to check and update the sub-config. 166 167 Returns: 168 A ModelPT DictConfig with additional keys added to the sub-config. 169 """ 170 if not _HAS_HYDRA: 171 logging.error("This function requires Hydra/Omegaconf and it was not installed.") 172 exit(1) 173 with open_dict(model_cfg.model): 174 # Create copy of original model sub config 175 if subconfig_key in update_cfg.model: 176 if subconfig_key not in model_cfg.model: 177 # create the key as a placeholder 178 model_cfg.model[subconfig_key] = None 179 180 subconfig = copy.deepcopy(model_cfg.model[subconfig_key]) 181 update_subconfig = copy.deepcopy(update_cfg.model[subconfig_key]) 182 183 # Add the keys and update temporary values, will be updated during full merge 184 subconfig = OmegaConf.merge(update_subconfig, subconfig) 185 # Update sub config 186 model_cfg.model[subconfig_key] = subconfig 187 188 return model_cfg 189 190 191 def assert_dataclass_signature_match( 192 cls: 'class_type', 193 datacls: 'dataclass', 194 ignore_args: Optional[List[str]] = None, 195 remap_args: Optional[Dict[str, str]] = None, 196 ): 197 """ 198 Analyses the signature of a provided class and its respective data class, 199 asserting that the dataclass signature matches the class __init__ signature. 200 201 Note: 202 This is not a value based check. This function only checks if all argument 203 names exist on both class and dataclass and logs mismatches. 204 205 Args: 206 cls: Any class type - but not an instance of a class. Pass type(x) where x is an instance 207 if class type is not easily available. 208 datacls: A corresponding dataclass for the above class. 209 ignore_args: (Optional) A list of string argument names which are forcibly ignored, 210 even if mismatched in the signature. Useful when a dataclass is a superset of the 211 arguments of a class. 212 remap_args: (Optional) A dictionary, mapping an argument name that exists (in either the 213 class or its dataclass), to another name. Useful when argument names are mismatched between 214 a class and its dataclass due to indirect instantiation via a helper method. 215 216 Returns: 217 A tuple containing information about the analysis: 218 1) A bool value which is True if the signatures matched exactly / after ignoring values. 219 False otherwise. 220 2) A set of arguments names that exist in the class, but *do not* exist in the dataclass. 221 If exact signature match occurs, this will be None instead. 222 3) A set of argument names that exist in the data class, but *do not* exist in the class itself. 223 If exact signature match occurs, this will be None instead. 224 """ 225 class_sig = inspect.signature(cls.__init__) 226 227 class_params = dict(**class_sig.parameters) 228 class_params.pop('self') 229 230 dataclass_sig = inspect.signature(datacls) 231 232 dataclass_params = dict(**dataclass_sig.parameters) 233 dataclass_params.pop("_target_", None) 234 235 class_params = set(class_params.keys()) 236 dataclass_params = set(dataclass_params.keys()) 237 238 if remap_args is not None: 239 for original_arg, new_arg in remap_args.items(): 240 if original_arg in class_params: 241 class_params.remove(original_arg) 242 class_params.add(new_arg) 243 logging.info(f"Remapped {original_arg} -> {new_arg} in {cls.__name__}") 244 245 if original_arg in dataclass_params: 246 dataclass_params.remove(original_arg) 247 dataclass_params.add(new_arg) 248 logging.info(f"Remapped {original_arg} -> {new_arg} in {datacls.__name__}") 249 250 if ignore_args is not None: 251 ignore_args = set(ignore_args) 252 253 class_params = class_params - ignore_args 254 dataclass_params = dataclass_params - ignore_args 255 logging.info(f"Removing ignored arguments - {ignore_args}") 256 257 intersection = set.intersection(class_params, dataclass_params) 258 subset_cls = class_params - intersection 259 subset_datacls = dataclass_params - intersection 260 261 if (len(class_params) != len(dataclass_params)) or len(subset_cls) > 0 or len(subset_datacls) > 0: 262 logging.error(f"Class {cls.__name__} arguments do not match " f"Dataclass {datacls.__name__}!") 263 264 if len(subset_cls) > 0: 265 logging.error(f"Class {cls.__name__} has additional arguments :\n" f"{subset_cls}") 266 267 if len(subset_datacls): 268 logging.error(f"Dataclass {datacls.__name__} has additional arguments :\n{subset_datacls}") 269 270 return False, subset_cls, subset_datacls 271 272 else: 273 return True, None, None 274 [end of nemo/utils/config_utils.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
NVIDIA/NeMo
c607061264713c9f4c35d1fbc5afaaf41471317e
Punctuation data set uses too much memory **Describe the bug** Punctuation datasets cannot be constructed with more than a few million examples (depending on the max length). Even these small datasets can consume a huge amount of memory while preprocessing (100's of GB) and while training (10's of GB per process). This issue was mentioned in passing in https://github.com/NVIDIA/NeMo/issues/2569 though it was not the main issue discussed there and thus not fixed. Assuming it is an acceptable idea to load the entire dataset into memory in the first place, these are the points where there seems to be an excessive use of memory. The function with issues (`get_features`): https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py#L34 The main issues: 1. Why save `all_segment_ids`? It appears there is nowhere this is set to anything but a constant 0. `__getitem__` can deal with it. 2. Why pad to the max length? `collate_fn` can deal with that, and could do so much more efficiently (by only padding to the max length of the batch). 3. Loss mask and input mask can be generated by `__getitem__` or `collate_fn` 4. Subtokens mask could be generated later as well, albeit not trivially. **Expected behavior** Should be able to use a dataset with 10's of millions of lines.
2021-11-10T13:43:43Z
<patch> diff --git a/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py b/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py new file mode 100644 --- /dev/null +++ b/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py @@ -0,0 +1,331 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import multiprocessing as mp +from pathlib import Path + +from nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset import ( + DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME, + DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME, + METADATA_CAPIT_LABEL_VOCAB_KEY, + METADATA_PUNCT_LABEL_VOCAB_KEY, + build_label_ids_from_list_of_labels, + check_labels_for_being_unique_before_building_label_ids, + create_tarred_dataset, +) + + +""" +A tarred dataset allows to train on large amounts without storing it all into memory simultaneously. In case of +punctuation and capitalization model, tarred dataset is a directory which contains metadata file, tar files with +batches, punct_label_vocab.csv and capit_label_vocab.csv files. + +A metadata file is a JSON file with 4 fields: 'num_batches', 'tar_files', 'punct_label_vocab_file', +'capit_label_vocab_file'. 'num_batches' (int) is a total number of batches in tarred dataset. 'tar_files' is a list of +paths to tar files relative to directory containing the metadata file. 'punct_label_vocab_file' and +'capit_label_vocab_file' are paths to .csv files containing all unique punctuation and capitalization labels. Each +label in these files is written in a separate line. The first labels in both files are equal and serve for padding and +as neutral labels. + +Every tar file contains objects written using `webdataset.TarWriter`. Each object is a dictionary with two items: +'__key__' and 'batch.pyd'. '__key__' is a name of a batch and 'batch.pyd' is a pickled dictionary which contains +'input_ids', 'subtokens_mask', 'punct_labels', 'capit_labels'. 'input_ids' is an array containing ids of source tokens, +'subtokens_mask' is a boolean array showing first tokens in words, 'punct_labels' and 'capit_labels' are arrays with +ids of labels. Metadata file should be passed to constructor of +`nemo.collections.nlp.data.token_classification.PunctuationCapitalizationTarredDataset` and the instance of +the class will handle iteration and constructing masks and token types for BERT model. + +Example of usage: + +python create_punctuation_capitalization_tarred_dataset.py \ + --text <PATH_TO_TEXT_FILE> \ + --labels <PATH_TO_LABELS_FILE> \ + --output_dir <PATH_TO_OUTPUT_DIR> \ + --lines_per_dataset_fragment 10000 \ + --tokens_in_batch 8000 \ + --num_batches_per_tarfile 5 \ + --tokenizer_name char \ + --vocab_file <PATH_TO_CHAR_TOKENIZER_VOCABULARY> +""" + + +def get_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description=f"A tarred dataset allows to train on large amounts without storing it all into memory " + f"simultaneously. In case of punctuation and capitalization model, tarred dataset is a directory which " + f"contains metadata file, tar files with batches, {DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME} and " + f"{DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME} files. A metadata file is a JSON file with 4 fields: 'num_batches', " + f"'tar_files', '{METADATA_PUNCT_LABEL_VOCAB_KEY}', '{METADATA_CAPIT_LABEL_VOCAB_KEY}'. 'num_batches' (int) is " + f"a total number of batches in tarred dataset. 'tar_files' is a list of paths to tar files relative " + f"to directory containing the metadata file. '{METADATA_PUNCT_LABEL_VOCAB_KEY}' and " + f"'{METADATA_CAPIT_LABEL_VOCAB_KEY}' are paths to .csv files containing all unique punctuation and " + f"capitalization labels. Each label in these files is written in a separate line. The first labels in both " + f"files are equal and serve for padding and as neutral labels. Every tar file contains objects written " + f"using `webdataset.TarWriter`. Each object is a dictionary with two items: '__key__' and 'batch.pyd'. " + f"'__key__' is a name of a batch and 'batch.pyd' is a pickled dictionary which contains 'input_ids', " + f"'subtokens_mask', 'punct_labels', 'capit_labels'. 'input_ids' is an array containing ids of source tokens, " + f"'subtokens_mask' is a boolean array showing first tokens in words, 'punct_labels' and 'capit_labels' are " + f"arrays with ids of labels. Metadata file should be passed to constructor of " + "`nemo.collections.nlp.data.token_classification.PunctuationCapitalizationTarredDataset` and the instance of " + "the class will handle iteration and constructing masks and token types for BERT model.", + ) + parser.add_argument( + "--text", + "-t", + help="Path to source lowercased text without punctuation. Number of lines in `--text` file has to be equal " + "to number of lines in `--labels` file.", + type=Path, + required=True, + ) + parser.add_argument( + "--labels", + "-L", + type=Path, + required=True, + help="Path to file with labels in the format described here " + "https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#" + "nemo-data-format . Number of lines in `--labels` file has to be equal to the number of lines in `--text` " + "file.", + ) + parser.add_argument( + "--output_dir", + "-o", + type=Path, + required=True, + help="Path to directory where .tar files, metadata file, label id files are stored.", + ) + parser.add_argument( + "--max_seq_length", + "-s", + type=int, + default=512, + help="Maximum number of subtokens in an input sequence. A source sequence which contain too many subtokens are " + "clipped to `--max_seq_length - 2` subtokens and then [CLS] token is prepended to the clipped sequence and " + "[SEP] token is appended to the clipped sequence. The clipping is performed via removal of subtokens in the " + "end of a source sequence.", + ) + parser.add_argument( + "--tokens_in_batch", + "-b", + type=int, + default=15000, + help="Maximum number of tokens in a batch including [CLS], [SEP], [UNK], and [PAD] tokens. Before packing into " + "batches source sequences are sorted by number of tokens in order to reduce number of pad tokens. So the " + "number of sequences in a batch may be different.", + ) + parser.add_argument( + "--lines_per_dataset_fragment", + type=int, + default=10 ** 6, + help="A number of lines processed by one worker during creation of tarred dataset. A worker tokenizes " + "`--lines_per_dataset_fragment` lines and keeps in RAM tokenized text labels before packing them into " + "batches. Reducing `--lines_per_dataset_fragment` leads to reducing of the amount of memory required by this " + "script.", + ) + parser.add_argument( + "--num_batches_per_tarfile", + type=int, + default=1000, + help="A number of batches saved in a tar file. If you increase `--num_batches_per_tarfile`, then there will " + "be less tar files in the dataset. There cannot be less then `--num_batches_per_tarfile` batches in a tar " + "file, and all excess batches are removed. Maximum number of discarded batches is " + "`--num_batches_per_tarfile - 1`.", + ) + parser.add_argument( + "--tokenizer_name", + "-T", + default="bert-base-uncased", + help="Name of the tokenizer used for tokenization of source sequences. Possible options are 'sentencepiece', " + "'word', 'char', HuggingFace tokenizers. For more options see function " + "`nemo.collections.nlp.modules.common.get_tokenizer`. The tokenizer has to have properties `cls_id`, " + "`pad_id`, `sep_id`, `unk_id`.", + ) + parser.add_argument( + "--tokenizer_model", "-m", type=Path, help="Path to tokenizer model required for 'sentencepiece' tokenizer." + ) + parser.add_argument( + "--vocab_file", + "-v", + type=Path, + help="Path to vocabulary file which can be used in 'word', 'char', and HuggingFace tokenizers.", + ) + parser.add_argument( + "--merges_file", "-M", type=Path, help="Path to merges file which can be used in HuggingFace tokenizers." + ) + parser.add_argument( + "--special_token_names", + "-n", + nargs="+", + help="Names of special tokens which may be passed to constructors of 'char', 'word', 'sentencepiece', and " + "HuggingFace tokenizers.", + ) + parser.add_argument( + "--special_token_values", + "-V", + nargs="+", + help="Values of special tokens which may be passed to constructors of 'char', 'word', 'sentencepiece', and " + "HuggingFace tokenizers.", + ) + parser.add_argument( + "--use_fast_tokenizer", "-f", action="store_true", help="Whether to use fast HuggingFace tokenizer." + ) + parser.add_argument( + "--pad_label", + "-P", + default='O', + help="Pad label both for punctuation and capitalization. This label is also is used for marking words which " + "do not need punctuation and capitalization. It is also a neutral label used for marking words which do " + "not require punctuation and capitalization.", + ) + punct = parser.add_mutually_exclusive_group(required=False) + punct.add_argument( + "--punct_labels", + "-p", + nargs="+", + help="All punctuation labels EXCEPT PAD LABEL. Punctuation labels are strings separated by spaces. " + "Alternatively you can use parameter `--punct_label_vocab_file`. If none of parameters `--punct_labels` " + "and `--punct_label_vocab_file` are provided, then punctuation label ids will be inferred from `--labels` " + "file.", + ) + punct.add_argument( + "--punct_label_vocab_file", + type=Path, + help="A path to file with punctuation labels. These labels include pad label. Pad label has to be the first " + "label in the file. Each label is written on separate line. Alternatively you can use `--punct_labels` " + "parameter. If none of parameters `--punct_labels` and `--punct_label_vocab_file` are provided, then " + "punctuation label ids will be inferred from `--labels` file.", + ) + capit = parser.add_mutually_exclusive_group(required=False) + capit.add_argument( + "--capit_labels", + "-c", + nargs="+", + help="All capitalization labels EXCEPT PAD LABEL. Capitalization labels are strings separated by spaces. " + "Alternatively you can use parameter `--capit_label_vocab_file`. If none of parameters `--capit_labels` " + "and `--capit_label_vocab_file` are provided, then capitalization label ids will be inferred from `--labels` " + "file.", + ) + capit.add_argument( + "--capit_label_vocab_file", + type=Path, + help="A path to file with capitalization labels. These labels include pad label. Pad label has to be the " + "first label in the file. Each label is written on separate line. Alternatively you can use `--capit_labels` " + "parameter. If none of parameters `--capit_labels` and `--capit_label_vocab_file` are provided, then " + "capitalization label ids will be inferred from `--labels` file.", + ) + parser.add_argument( + "--tar_file_prefix", + "-x", + default="punctuation_capitalization", + help="A string from which tar file names start.", + ) + parser.add_argument( + "--n_jobs", + "-j", + type=int, + default=mp.cpu_count(), + help="Number of workers for creating tarred dataset. By default it is equal to the number of CPU cores.", + ) + args = parser.parse_args() + for name in [ + "text", + "labels", + "output_dir", + "tokenizer_model", + "vocab_file", + "merges_file", + "punct_label_vocab_file", + "capit_label_vocab_file", + ]: + if getattr(args, name) is not None: + setattr(args, name, getattr(args, name).expanduser()) + if args.special_token_names is not None or args.special_token_values is not None: + if args.special_token_names is None: + parser.error( + "If you provide parameter `--special_token_values` you have to provide parameter " + "`--special_token_names`." + ) + if args.special_token_values is None: + parser.error( + "If you provide parameter `--special_token_names` you have to provide parameter " + "`--special_token_values`." + ) + if len(args.special_token_names) != len(args.special_token_values): + parser.error( + f"Parameters `--special_token_names` and `--special_token_values` have to have equal number of values " + f"whereas parameter `--special_token_names` has {len(args.special_token_names)} values and parameter " + f"`--special_token_values` has {len(args.special_token_values)} values." + ) + if len(set(args.special_token_names)) != len(args.special_token_names): + for i in range(len(args.special_token_names) - 1): + if args.special_token_names[i] in args.special_token_names[i + 1 :]: + parser.error( + f"Values of parameter `--special_token_names` has to be unique. Found duplicate value " + f"'{args.special_token_names[i]}'." + ) + if args.punct_labels is not None: + check_labels_for_being_unique_before_building_label_ids( + args.pad_label, args.punct_labels, '--pad_label', '--punct_labels', parser.error + ) + check_labels_for_being_unique_before_building_label_ids( + args.pad_label, args.capit_labels, '--pad_label', '--capit_labels', parser.error + ) + return args + + +def main() -> None: + args = get_args() + if args.special_token_names is None: + special_tokens = None + else: + special_tokens = dict(zip(args.special_token_names, args.special_token_values)) + + if args.punct_labels is not None: + punct_label_ids = build_label_ids_from_list_of_labels(args.pad_label, args.punct_labels) + else: + punct_label_ids = None + + if args.capit_labels is not None: + capit_label_ids = build_label_ids_from_list_of_labels(args.pad_label, args.capit_labels) + else: + capit_label_ids = None + + create_tarred_dataset( + args.text, + args.labels, + args.output_dir, + args.max_seq_length, + args.tokens_in_batch, + args.lines_per_dataset_fragment, + args.num_batches_per_tarfile, + args.tokenizer_name, + tokenizer_model=args.tokenizer_model, + vocab_file=args.vocab_file, + merges_file=args.merges_file, + special_tokens=special_tokens, + use_fast_tokenizer=args.use_fast_tokenizer, + pad_label=args.pad_label, + punct_label_ids=punct_label_ids, + capit_label_ids=capit_label_ids, + punct_label_vocab_file=args.punct_label_vocab_file, + capit_label_vocab_file=args.capit_label_vocab_file, + tar_file_prefix=args.tar_file_prefix, + n_jobs=args.n_jobs, + ) + + +if __name__ == "__main__": + main() diff --git a/examples/nlp/token_classification/punctuate_capitalize_infer.py b/examples/nlp/token_classification/punctuate_capitalize_infer.py --- a/examples/nlp/token_classification/punctuate_capitalize_infer.py +++ b/examples/nlp/token_classification/punctuate_capitalize_infer.py @@ -15,6 +15,7 @@ import argparse import json from pathlib import Path +from typing import Dict, List, Union import torch.cuda @@ -40,7 +41,7 @@ """ -def get_args(): +def get_args() -> argparse.Namespace: default_model_parameter = "pretrained_name" default_model = "punctuation_en_bert" parser = argparse.ArgumentParser( @@ -166,7 +167,7 @@ def get_args(): return args -def load_manifest(manifest: Path): +def load_manifest(manifest: Path) -> List[Dict[str, Union[str, float]]]: result = [] with manifest.open() as f: for i, line in enumerate(f): @@ -175,7 +176,7 @@ def load_manifest(manifest: Path): return result -def main(): +def main() -> None: args = get_args() if args.pretrained_name is None: model = PunctuationCapitalizationModel.restore_from(args.model_path) @@ -188,7 +189,6 @@ def main(): model = model.cpu() else: model = model.to(args.device) - model = model.cpu() if args.input_manifest is None: texts = [] with args.input_text.open() as f: diff --git a/examples/nlp/token_classification/punctuation_capitalization_evaluate.py b/examples/nlp/token_classification/punctuation_capitalization_evaluate.py deleted file mode 100644 --- a/examples/nlp/token_classification/punctuation_capitalization_evaluate.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import pytorch_lightning as pl -from omegaconf import DictConfig - -from nemo.collections.nlp.models import PunctuationCapitalizationModel -from nemo.core.config import hydra_runner -from nemo.utils import logging -from nemo.utils.exp_manager import exp_manager - - -""" -This script shows how to perform evaluation and runs inference of a few examples. - -More details on the task and data format could be found in tutorials/nlp/Punctuation_and_Capitalization.ipynb - -*** Setting the configs *** - -This script uses the `/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml` config file -by default. You may update the config file from the file directly. -The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'. - -For more details about the config files and different ways of model restoration, see tutorials/00_NeMo_Primer.ipynb - - -*** Model Evaluation *** - - python punctuation_capitalization_evaluate.py \ - model.dataset.data_dir=<PATH_TO_DATA_DIR> \ - pretrained_model=punctuation_en_distilbert - -<PATH_TO_DATA_DIR> - a directory that contains test_ds.text_file and test_ds.labels_file (see the config) -pretrained_model - pretrained PunctuationCapitalizationModel model from list_available_models() or - path to a .nemo file, for example: punctuation_en_bert or your_model.nemo - -""" - - -@hydra_runner(config_path="conf", config_name="punctuation_capitalization_config") -def main(cfg: DictConfig) -> None: - logging.info( - 'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU and \ - no DDP to obtain accurate results' - ) - - if not hasattr(cfg.model, 'test_ds'): - raise ValueError(f'model.test_ds was not found in the config, skipping evaluation') - else: - gpu = 1 if cfg.trainer.gpus != 0 else 0 - - trainer = pl.Trainer(gpus=gpu, precision=cfg.trainer.precision, logger=False, checkpoint_callback=False,) - exp_dir = exp_manager(trainer, cfg.exp_manager) - - if not cfg.pretrained_model: - raise ValueError( - 'To run evaluation and inference script a pre-trained model or .nemo file must be provided.' - f'Choose from {PunctuationCapitalizationModel.list_available_models()} or "pretrained_model"="your_model.nemo"' - ) - - if os.path.exists(cfg.pretrained_model): - model = PunctuationCapitalizationModel.restore_from(cfg.pretrained_model) - elif cfg.pretrained_model in PunctuationCapitalizationModel.get_available_model_names(): - model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model) - else: - raise ValueError( - f'Provide path to the pre-trained .nemo file or choose from {PunctuationCapitalizationModel.list_available_models()}' - ) - - data_dir = cfg.model.dataset.get('data_dir', None) - - if data_dir is None: - logging.error( - 'No dataset directory provided. Skipping evaluation. ' - 'To run evaluation on a file, specify path to the directory that contains test_ds.text_file and test_ds.labels_file with "model.dataset.data_dir" argument.' - ) - elif not os.path.exists(data_dir): - logging.error(f'{data_dir} is not found, skipping evaluation on the test set.') - else: - model.update_data_dir(data_dir=data_dir) - model._cfg.dataset = cfg.model.dataset - - if not hasattr(cfg.model, 'test_ds'): - logging.error(f'model.test_ds was not found in the config, skipping evaluation') - elif model.prepare_test(trainer): - model.setup_test_data(cfg.model.test_ds) - trainer.test(model) - else: - logging.error('Skipping the evaluation. The trainer is not setup properly.') - - # run an inference on a few examples - queries = [ - 'we bought four shirts one pen and a mug from the nvidia gear store in santa clara', - 'what can i do for you today', - 'how are you', - ] - - inference_results = model.add_punctuation_capitalization(queries, batch_size=len(queries), max_seq_length=512) - - for query, result in zip(queries, inference_results): - logging.info(f'Query : {query}') - logging.info(f'Result: {result.strip()}\n') - - logging.info(f'Results are saved at {exp_dir}') - - -if __name__ == '__main__': - main() diff --git a/examples/nlp/token_classification/punctuation_capitalization_train.py b/examples/nlp/token_classification/punctuation_capitalization_train_evaluate.py similarity index 52% rename from examples/nlp/token_classification/punctuation_capitalization_train.py rename to examples/nlp/token_classification/punctuation_capitalization_train_evaluate.py --- a/examples/nlp/token_classification/punctuation_capitalization_train.py +++ b/examples/nlp/token_classification/punctuation_capitalization_train_evaluate.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,9 +15,13 @@ import os import pytorch_lightning as pl +import torch from omegaconf import DictConfig, OmegaConf from nemo.collections.nlp.models import PunctuationCapitalizationModel +from nemo.collections.nlp.models.token_classification.punctuation_capitalization_config import ( + PunctuationCapitalizationConfig, +) from nemo.core.config import hydra_runner from nemo.utils import logging from nemo.utils.exp_manager import exp_manager @@ -44,25 +48,50 @@ *** Model training *** To run this script and train the model from scratch, use: - python punctuation_and_capitalization_train.py \ - model.dataset.data_dir=<PATH_TO_DATA_DIR> + python punctuation_capitalization_train_evaluate.py \ + model.train_ds.ds_item=<PATH_TO_TRAIN_DATA> \ + model.validation_ds.ds_item=<PATH_TO_DEV_DATA> To use one of the pretrained versions of the model and finetune it, run: - python punctuation_and_capitalization.py \ - pretrained_model=punctuation_en_bert \ - model.dataset.data_dir=<PATH_TO_DATA_DIR> + python punctuation_capitalization_train_evaluate.py \ + pretrained_model=punctuation_en_bert \ + model.train_ds.ds_item=<PATH_TO_TRAIN_DATA> \ + model.validation_ds.ds_item=<PATH_TO_DEV_DATA> - <PATH_TO_DATA_DIR> - a directory that contains test_ds.text_file and test_ds.labels_file (see the config) pretrained_model - pretrained PunctuationCapitalization model from list_available_models() or - path to a .nemo file, for example: punctuation_en_bert or model.nemo + path to a .nemo file, for example: punctuation_en_bert or model.nemo + +If you wish to perform testing after training set `do_testing` to `true: + python punctuation_capitalization_train_evaluate.py \ + +do_testing=true \ + pretrained_model=punctuation_en_bert \ + model.train_ds.ds_item=<PATH_TO_TRAIN_DATA> \ + model.validation_ds.ds_item=<PATH_TO_DEV_DATA> + +Set `do_training` to `false` and `do_testing` to `true` to perform evaluation without training: + python punctuation_capitalization_train_evaluate.py \ + +do_testing=true \ + +do_training=false \ + pretrained_model=punctuation_en_bert \ + model.validation_ds.ds_item=<PATH_TO_DEV_DATA> """ @hydra_runner(config_path="conf", config_name="punctuation_capitalization_config") def main(cfg: DictConfig) -> None: + torch.manual_seed(42) + cfg = OmegaConf.merge(OmegaConf.structured(PunctuationCapitalizationConfig()), cfg) trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) + if not cfg.do_training and not cfg.do_testing: + raise ValueError("At least one of config parameters `do_training` and `do_testing` has to `true`.") + if cfg.do_training: + if cfg.model.get('train_ds') is None: + raise ValueError('`model.train_ds` config section is required if `do_training` config item is `True`.') + if cfg.do_testing: + if cfg.model.get('test_ds') is None: + raise ValueError('`model.test_ds` config section is required if `do_testing` config item is `True`.') if not cfg.pretrained_model: logging.info(f'Config: {OmegaConf.to_yaml(cfg)}') @@ -74,27 +103,28 @@ def main(cfg: DictConfig) -> None: model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model) else: raise ValueError( - f'Provide path to the pre-trained .nemo file or choose from {PunctuationCapitalizationModel.list_available_models()}' + f'Provide path to the pre-trained .nemo file or choose from ' + f'{PunctuationCapitalizationModel.list_available_models()}' ) - - data_dir = cfg.model.dataset.get('data_dir', None) - if data_dir: - if not os.path.exists(data_dir): - raise ValueError(f'{data_dir} is not found at') - - # we can also do finetuning of the pretrained model but we would need to update the data dir - model.update_data_dir(data_dir) - # setup train and validation Pytorch DataLoaders + model.update_config_after_restoring_from_checkpoint( + class_labels=cfg.model.class_labels, + common_dataset_parameters=cfg.model.common_dataset_parameters, + train_ds=cfg.model.get('train_ds') if cfg.do_training else None, + validation_ds=cfg.model.get('validation_ds') if cfg.do_training else None, + test_ds=cfg.model.get('test_ds') if cfg.do_testing else None, + optim=cfg.model.get('optim') if cfg.do_training else None, + ) + model.set_trainer(trainer) + if cfg.do_training: model.setup_training_data() model.setup_validation_data() - logging.info(f'Using config file of the pretrained model') + model.setup_optimization() else: - raise ValueError( - 'Specify a valid dataset directory that contains test_ds.text_file and test_ds.labels_file \ - with "model.dataset.data_dir" argument' - ) - - trainer.fit(model) + model.setup_test_data() + if cfg.do_training: + trainer.fit(model) + if cfg.do_testing: + trainer.test(model) if __name__ == '__main__': diff --git a/nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py --- a/nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py +++ b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py @@ -12,209 +12,850 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ['BertPunctuationCapitalizationDataset', 'BertPunctuationCapitalizationInferDataset'] +__all__ = [ + 'BertPunctuationCapitalizationDataset', + 'LABEL_ID_DIR_FOR_NEMO_CHECKPOINT', + 'Progress', + 'PunctuationCapitalizationEvalDataConfig', + 'PunctuationCapitalizationTrainDataConfig', + 'create_label_ids', + 'create_masks_and_segment_ids', + 'is_legacy_data_config', + 'legacy_data_config_to_new_data_config', + 'load_label_ids', + 'raise_not_equal_labels_error', + 'save_label_ids', +] import itertools +import multiprocessing as mp import os import pickle -from typing import Dict, List, Optional, Tuple +import random +from dataclasses import dataclass +from math import ceil +from pathlib import Path +from queue import Empty +from time import sleep +from typing import Any, Dict, List, Optional, Set, Tuple, Union import numpy as np import torch -from torch.nn.utils.rnn import pad_sequence +from numpy.typing import ArrayLike +from omegaconf import MISSING, DictConfig, OmegaConf +from tqdm import tqdm from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec from nemo.collections.nlp.data.data_utils.data_preprocessing import get_label_stats, get_stats from nemo.core.classes import Dataset -from nemo.core.neural_types import ChannelType, Index, LabelsType, MaskType, NeuralType -from nemo.core.neural_types.elements import BoolType +from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType from nemo.utils import logging +MAX_NUM_QUERIES_IN_SPLIT = 10 ** 4 +TOKENIZATION_PROGRESS_REPORT_PERIOD = 10 ** 3 +BATCH_MARK_UP_PROGRESS_REPORT_PERIOD = 10 ** 4 +BATCH_BUILDING_PROGRESS_REPORT_PERIOD = 10 ** 4 + +LABEL_ID_DIR_FOR_NEMO_CHECKPOINT = "label_id_files_for_nemo_checkpoint" + + +@dataclass +class PunctuationCapitalizationDataConfigBase: + """A base class for punctuation and capitalization data configs. This class does not define ``ds_item`` + attribute which works differently for train and evaluation data.""" + + ################################################# + # COMMON DATASET PARAMETERS + ################################################# + use_tarred_dataset: bool = MISSING + """Whether to use tarred dataset. If True, then you should provide ``tar_metadata_file``. Otherwise, you should + provide ``text_file``, ``labels_file``, ``tokens_in_batch``.""" + + label_info_save_dir: Optional[str] = None + """A path to a directory where files created during dataset processing are stored. These files include label id + files and label stats files. By default, it is a directory containing ``text_file`` or ``tar_metadata_file``. + You may need this parameter if dataset directory is read-only and thus does not allow saving anything near dataset + files""" + + ################################################# + # REGULAR DATASET PARAMETERS + ################################################# + text_file: Optional[str] = None + """A path to a file with source text data without punctuation and capitalization.""" + + labels_file: Optional[str] = None + """A path to a file with punctuation and capitalization labels in NeMo format. NeMo format is described in + `documentation + <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format>`_ + """ + + tokens_in_batch: Optional[int] = None + """Number of tokens in a batch including paddings and special tokens ([CLS], [SEP], [UNK]). This config does + not have ``batch_size`` parameter.""" + + max_seq_length: int = 512 + """Max number of tokens in a source sequence. ``max_seq_length`` includes [CLS] and [SEP] tokens. Sequences + which are too long will be clipped by removal of tokens from the end of a sequence.""" + + num_samples: int = -1 + """A number of samples loaded from ``text_file`` and ``labels_file`` which are used in the dataset. If this + parameter equals ``-1``, then all samples are used.""" + + use_cache: bool = True + """Whether to use pickled features. If pickled features does not exist, then pickled features will be created. + For large regular datasets, pickled features may considerably reduce time for training starting. Tokenization + of source sequences is not fast because sequences are split into words before tokenization. For even larger + datasets (~4M), tarred datasets are recommended.""" + + cache_dir: Optional[str] = None + """A path to a directory containing cache or directory where newly created cache is saved. By default, it is + a directory containing ``text_file``. You may need this parameter if cache for a dataset is going to be created + and the dataset directory is read-only. + + ``cache_dir`` and ``label_info_save_dir`` are separate parameters for the case when a cache is ready and this cache + is stored in a read only directory. In this case you will separate ``label_info_save_dir``.""" + + get_label_frequences: bool = False + """Whether to show and save label frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If + ``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir``""" + + verbose: bool = True + """If ``True`` dataset instance will print progress messages and examples of acquired features.""" + + n_jobs: Optional[int] = 0 + """Number of workers used for features creation (tokenization, label encoding, and clipping). If 0, then + multiprocessing is not used; if ``None``, then n_jobs is equal to the number of CPU cores. + There can be weird deadlocking errors with some tokenizers (e.g. SentencePiece) if ``n_jobs`` is greater than zero. + """ + + ################################################# + # TARRED DATASET PARAMETERS + ################################################# + tar_metadata_file: Optional[str] = None + """A path to tarred dataset metadata file. Tarred metadata file and other parts of tarred dataset are usually + created by the script + `examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py + <https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_ + """ + + tar_shuffle_n: int = 1 + """The size of shuffle buffer of `webdataset`. The number of batches which are permuted.""" + + ################################################# + # PYTORCH DATALOADER PARAMETERS + ################################################# + shuffle: bool = True + """Shuffle batches every epoch. For regular training datasets, the parameter also activates batch repacking every + epoch. For tarred dataset, it would be only batches permutation.""" + + drop_last: bool = False + """In cases when data parallelism is used, ``drop_last`` defines the way data pipeline behaves when some replicas + are out of data and some are not. If ``drop_last`` is ``True``, then epoch ends in the moment when any replica runs + out of data. If ``drop_last`` is ``False``, then the replica will replace missing batch with a batch from a pool of + batches that the replica has already processed. If data parallelism is not used, then parameter ``drop_last`` does + not do anything. For more information see ``torch.utils.data.distributed.DistributedSampler``""" + + pin_memory: bool = True + """See ``torch.utils.data.DataLoader`` documentation.""" + + num_workers: int = 8 + """See ``torch.utils.data.DataLoader`` documentation.""" + + persistent_workers: bool = True + """See ``torch.utils.data.DataLoader`` documentation.""" + + +@dataclass +class PunctuationCapitalizationTrainDataConfig(PunctuationCapitalizationDataConfigBase): + ds_item: Optional[str] = MISSING + """Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay.""" + -def get_features( - queries: List[str], +@dataclass +class PunctuationCapitalizationEvalDataConfig(PunctuationCapitalizationDataConfigBase): + ds_item: Optional[Any] = MISSING + """Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay. ``Any`` = ``str`` or + ``List[str]``. If a ``List[str]``, then the model is tested or validated on several datasets.""" + + +def is_legacy_data_config(ds_section: DictConfig) -> bool: + return 'use_tarred_dataset' not in ds_section + + +def legacy_data_config_to_new_data_config( + ds_section: DictConfig, legacy_dataset_section: DictConfig, train: bool +) -> DictConfig: + """ + Transform old style dataset to new format dataset. + Args: + ds_section: a ds section (``train_ds``, or ``validation_ds``, or ``test_ds``) from old style config. Such + section contain ``batch_size`` parameter. + legacy_dataset_section: a ``model.dataset`` section. ``model.dataset`` section contains ``data_dir`` parameter + train: ``True`` if ``train_ds`` is transformed and ``False`` otherwise + + Returns: + New format dataset based on either ``PunctuationCapitalizationTrainDataConfig`` (``train=True``) or + ``PunctuationCapitalizationEvalDataConfig`` (``train=False``) + """ + if train: + cls = PunctuationCapitalizationTrainDataConfig + ds_item = legacy_dataset_section.get('data_dir') + else: + cls = PunctuationCapitalizationEvalDataConfig + ds_item = ds_section.get('ds_item') + ds_item = legacy_dataset_section.get('data_dir') if ds_item is None else ds_item + if ds_item is None: + raise ValueError( + f"Data directory was not found in legacy config.\nspecific dataset configuration:\n" + f"{OmegaConf.to_yaml(ds_section)}\nmodel.dataset:\n{OmegaConf.to_yaml(legacy_dataset_section)}" + ) + new_config = OmegaConf.structured( + cls( + use_tarred_dataset=False, + text_file=ds_section.text_file, + labels_file=ds_section.labels_file, + ds_item=ds_item, + max_seq_length=legacy_dataset_section.get( + 'max_seq_length', PunctuationCapitalizationDataConfigBase.max_seq_length + ), + ) + ) + return new_config + + +def _check_number_of_labels( + words: List[str], + query: str, + qi: int, + split_i: int, + punctuation_labels: List[str], + capitalization_labels: List[str], +) -> None: + if len(words) != len(punctuation_labels): + raise ValueError( + f"Number of punctuation labels for a query number {qi} in a split number {split_i} is not equal to " + f"number of words. Number of words: {len(words)}, number of punctuation labels: " + f"{len(punctuation_labels)}. First 100 characters of the query: '{query[:100]}', punctuation labels: " + f"'{punctuation_labels}'" + ) + if len(words) != len(capitalization_labels): + raise ValueError( + f"Number of capitalization labels for a query number {qi} in a split number {split_i} is not equal to " + f"number of words. Number of words: {len(words)}, number of capitalization labels: " + f"{len(capitalization_labels)}. First 100 characters of the query: '{query[:100]}', " + f"capitalization labels: '{capitalization_labels}'" + ) + + +def _show_prog(queues: Tuple[mp.Queue, ...], totals: List[int], descriptions: List[str], units: List[str]) -> None: + """ + Show several ``tqdm`` progress bars. + Args: + queues: a list of queues by which progress is delivered into this function. Each queue is responsible for one + progress bar. ``show_prog`` function extracts integers from ``queues`` elements and adds them to progress + bars. If value extracted from a queue equals ``-1``, then corresponding progress bar is closed. When all + progress bars are closed, this function returns. + totals: list of values 100% of progress bars. See more in a description of ``total`` parameter of + ``tqdm.tqdm`` function + descriptions: list of descriptions of progress bars. See more in a description of ``desc`` parameter of + ``tqdm.tqdm`` function + units: list of progress bar units. See more in a description of ``unit`` parameter of ``tqdm.tqdm`` function + """ + if not all([len(queues) == len(v) for v in [totals, descriptions, units]]): + raise ValueError( + f"All of parameters `queues`, `total_num_lines`, `descriptions`, `units` have to have equal lengths. " + f"len(queues)={len(queues)}, len(total_num_lines)={len(totals)}, " + f"len(descriptions)={len(descriptions)}, len(units)={len(units)}." + ) + prog = [ + tqdm(total=tt, desc=dd, unit=uu, unit_scale=True, position=i) + for i, (tt, dd, uu) in enumerate(zip(totals, descriptions, units)) + ] + finished = [False] * len(queues) + while True: + for i, queue in enumerate(queues): + stop = False + to_add = 0 + try: + v = queue.get(block=False) + while v != -1: + to_add += v + v = queue.get(block=False) + stop = True + except Empty: + if to_add == 0 and not stop: + continue + prog[i].n += to_add + prog[i].update(0) + if prog[i].n >= totals[i]: + finished[i] = True + prog[i].close() + if stop: + if prog[i].n < totals[i]: + logging.warning( + f"Progress with description '{descriptions[i]}' terminated before progress bar " + f"reached 100%. prog.n={prog[i].n}, total_num_lines={totals[i]}" + ) + finished[i] = True + prog[i].close() + if all(finished): + break + sleep(0.1) + + +class Progress: + """ + Manages several ``tqdm`` progress bars for multi process tasks. This class can be used as context manager. + + The class starts separate process which creates and updates progress bars. Information to progress process is + passed via multiprocessing queues. There is a separate queue for every progress bar. + + You can use it as context manager: + + .. code-block:: python + with Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"]) as progress_queues: + num_processes = 10 + with multiprocessing.Pool(num_processes) as pool: + data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes)) + pool.starmap(worker_func, data) + + Or without context manager: + + .. code-block:: python + progress = Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"]) + progress_queues = progress.get_queue() + num_processes = 10 + with multiprocessing.Pool(num_processes) as pool: + data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes)) + pool.starmap(worker_func, data) + progress.finish() + + In a worker function you will have to put number of processed items into the progress queues. For example: + + .. code-block:: python + def worker_func(my_datum, parrot_progress_queue, frog_progress_queue): + ... + for i in range(10): + parrot_progress_queue.put(1) + frog_progress_queue.put(2) + + Progress bars and progress process are closed when ``finish`` or ``__exit__`` methods are called. + """ + + def __init__(self, total: Union[int, List[int]], desc: Union[str, List[str]], unit: Union[str, List[str]]) -> None: + """ + Starts progress process and creates queues for passing information to the progress process. Number of progress + bars is equal to the max length of lists ``total``, ``desc``, ``unit``. If none of these parameters is a list, + then 1 progress bar is created. + + Args: + total: a list of ``int`` which length is equal to the number of progress bars OR an ``int`` OR a list of + one ``int``. Number which comprises 100% of progress bar. When sum of values passed through the + corresponding queue equals ``total`` corresponding progress bar reaches 100%. If ``total`` is an + ``int`` or a list of one element, then all progress bars have equal ``total`` parameter. + desc: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one + ``str``. Description of a progress bar which is showed as a prefix. See more in description of + parameter ``desc`` of function ``tqdm.tqdm``. + unit: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one + ``str``. A unit of a progress bar. See more in description of parameter ``unit`` of function + ``tqdm.tqdm``. + """ + if not isinstance(total, list): + total = [total] + if not isinstance(desc, list): + desc = [desc] + if not isinstance(unit, list): + unit = [unit] + num_processes = max([len(total), len(desc), len(unit)]) + for param in [total, desc, unit]: + if len(param) not in [num_processes, 1]: + raise ValueError( + f"If parameter of `Progress.__init__` method is a list, then it has to be the same length as other " + f"parameters which are lists" + ) + if len(param) == 1: + param *= num_processes + manager = mp.Manager() + self.progress_queues = tuple(manager.Queue() for _ in range(num_processes)) + self.progress_process = mp.Process(target=_show_prog, args=(self.progress_queues, total, desc, unit)) + self.progress_process.start() + + def __enter__(self) -> Tuple[mp.Queue, ...]: + return self.get_queues() + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + self.finish() + + def get_queues(self) -> Tuple[mp.Queue, ...]: + return self.progress_queues + + def finish(self) -> None: + for q in self.progress_queues: + q.put(-1) + self.progress_process.join() + + +class TokenizeCreateMasksClipWorker: + """A worker for tokenization, encoding labels, creating masks for first token in a word, sequence clipping""" + + def __init__( + self, + max_seq_length: int, + tokenizer: TokenizerSpec, + punct_label_ids: Optional[Dict[str, int]], + capit_label_ids: Optional[Dict[str, int]], + pad_label: str, + verbose: bool, + progress_queue: mp.Queue, + ) -> None: + """ + Args: + max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of + tokens in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence + are removed + tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id`` + punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0. + capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0. + pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization. + Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0`` + verbose: whether to report when the worker finishes its job + progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset + """ + self.max_seq_length = max_seq_length + self.tokenizer = tokenizer + self.punct_label_ids = punct_label_ids + self.capit_label_ids = capit_label_ids + self.pad_label = pad_label + self.verbose = verbose + self.progress_queue = progress_queue + + def _maybe_clip(self, values: List[int], append_value: int) -> List[int]: + if len(values) > self.max_seq_length: + return values[: self.max_seq_length - 1] + [append_value] + return values + + def __call__( + self, + queries: List[str], + punct_label_lines: Optional[Union[List[str], Tuple[str, ...]]], + capit_label_lines: Optional[Union[List[str], Tuple[str, ...]]], + split_i: int, + ) -> Tuple[List[ArrayLike], List[ArrayLike], List[ArrayLike], List[ArrayLike]]: + """ + Tokenize, clip, encode labels, and create masks of first tokens in words. + + Args: + queries: text sequences + punct_label_lines: a list or a tuple of labels for every word in a sequence (str) + capit_label_lines: a list of a tuple labels for every word in a sequence (str) + split_i: number of a split which is processed. Used for logging + + Returns: + input_ids: a list of 1D int32 arrays. Each array contains token ids of the corresponding query + subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the + first token in a word + punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in + one word have identical labels + capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens + in one word have identical labels + """ + all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels = [], [], [], [] + progress_made = 0 + for i, query in enumerate(queries): + words = query.split() + input_ids, subtokens_mask = [self.tokenizer.cls_id], [0] + _check_number_of_labels(words, query, i, split_i, punct_label_lines[i], capit_label_lines[i]) + pad_id = self.punct_label_ids[self.pad_label] + punct_labels = [pad_id] + punct_query_labels = [self.punct_label_ids[lab] for lab in punct_label_lines[i]] + capit_labels = [pad_id] + capit_query_labels = [self.capit_label_ids[lab] for lab in capit_label_lines[i]] + for j, word in enumerate(words): + word_ids = self.tokenizer.text_to_ids(word) + if not word_ids and len(word): + word_ids = [self.tokenizer.unk_id] + input_ids.extend(word_ids) + + subtokens_mask.append(1) + subtokens_mask.extend([0] * (len(word_ids) - 1)) + + punct_labels.extend([punct_query_labels[j]] * len(word_ids)) + capit_labels.extend([capit_query_labels[j]] * len(word_ids)) + + # add eos token + input_ids.append(self.tokenizer.sep_id) + subtokens_mask.append(0) + + all_input_ids.append(np.array(self._maybe_clip(input_ids, self.tokenizer.sep_id), dtype=np.int32)) + all_subtokens_mask.append(np.array(self._maybe_clip(subtokens_mask, 0), dtype=bool)) + + punct_labels.append(pad_id) + punct_all_labels.append(np.array(self._maybe_clip(punct_labels, pad_id), dtype=np.int32)) + capit_labels.append(pad_id) + capit_all_labels.append(np.array(self._maybe_clip(capit_labels, pad_id), dtype=np.int32)) + progress_made += 1 + if progress_made >= TOKENIZATION_PROGRESS_REPORT_PERIOD: + self.progress_queue.put(progress_made) + progress_made = 0 + self.progress_queue.put(progress_made) + if self.verbose: + logging.info(f"Finished processing data split number {split_i}") + return all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels + + +def _get_features( + queries: Union[List[str], Tuple[str, ...]], + punct_label_lines: Union[List[str], Tuple[str, ...]], + capit_label_lines: Union[List[str], Tuple[str, ...]], max_seq_length: int, tokenizer: TokenizerSpec, - punct_label_ids: dict = None, - capit_label_ids: dict = None, + punct_label_ids: Dict[str, int] = None, + capit_label_ids: Dict[str, int] = None, pad_label: str = 'O', - punct_labels_lines=None, - capit_labels_lines=None, - ignore_extra_tokens=False, - ignore_start_end: Optional[bool] = False, -): + verbose: bool = True, + n_jobs: Optional[int] = 0, + progress_queue: Optional[mp.Queue] = None, +) -> Tuple[List[ArrayLike], List[ArrayLike], List[ArrayLike], List[ArrayLike]]: """ - Processes the data and returns features. + Tokenizes data, encodes labels, creates masks of first tokens in words, clips sequences by number of tokens. Args: queries: text sequences - max_seq_length: max sequence length minus 2 for [CLS] and [SEP] - tokenizer: such as AutoTokenizer - pad_label: pad value use for labels. By default, it's the neutral label. - punct_label_ids: dict to map punctuation labels to label ids. - Starts with pad_label->0 and then increases in alphabetical order. - Required for training and evaluation, not needed for inference. - capit_label_ids: dict to map labels to label ids. Starts - with pad_label->0 and then increases in alphabetical order. - Required for training and evaluation, not needed for inference. - punct_labels: list of labels for every word in a sequence (str) - capit_labels: list of labels for every word in a sequence (str) - ignore_extra_tokens: whether to ignore extra tokens in the loss_mask - ignore_start_end: whether to ignore bos and eos tokens in the loss_mask + max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of tokens + in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence are removed + tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id`` + punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0. + capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0. + pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization. + Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0`` + punct_label_lines: a list of a tuple of labels for every word in a sequence (str) + capit_label_lines: a list or a tuple of labels for every word in a sequence (str) + verbose: whether to show examples of tokenized data and various progress information + n_jobs: a number of workers used for preparing features. If ``n_jobs <= 0``, then do not use multiprocessing + and run features creation in this process. If not set, number of workers will be equal to the number of + CPUs. + + !!WARNING!! + There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT) + if ``n_jobs > 0``. + + progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset Returns: - all_input_ids: input ids for all tokens - all_segment_ids: token type ids - all_input_mask: attention mask to use for BERT model - all_subtokens_mask: masks out all subwords besides the first one - all_loss_mask: loss mask to mask out tokens during training - punct_all_labels: all labels for punctuation task (ints) - capit_all_labels: all labels for capitalization task (ints) - punct_label_ids: label (str) to id (int) map for punctuation task - capit_label_ids: label (str) to id (int) map for capitalization task + input_ids: a list of 1D int32 arrays. Each array contains token ids of corresponding query + subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the + first token in a word + punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in one + word have identical labels. + capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens in + one word have identical labels """ - all_subtokens = [] - all_loss_mask = [] - all_subtokens_mask = [] - all_segment_ids = [] - all_input_ids = [] - all_input_mask = [] - sent_lengths = [] - punct_all_labels = [] - capit_all_labels = [] - with_label = False - - if punct_labels_lines and capit_labels_lines: - with_label = True - - for i, query in enumerate(queries): - words = query.strip().split() - - # add bos token - subtokens = [tokenizer.cls_token] - loss_mask = [1 - ignore_start_end] - subtokens_mask = [0] - if with_label: - pad_id = punct_label_ids[pad_label] - punct_labels = [pad_id] - punct_query_labels = [punct_label_ids[lab] for lab in punct_labels_lines[i]] + if verbose: + logging.info("Start initial tokenization.") + create_progress_process = progress_queue is None + if n_jobs is None: + n_jobs = min(mp.cpu_count(), len(queries)) + if verbose: + logging.info(f"Running tokenization with {n_jobs} jobs.") + + # Number of queries in split + split_size = min(len(queries) // max(n_jobs, 1), MAX_NUM_QUERIES_IN_SPLIT) + n_split = len(queries) // split_size + split_queries = [queries[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)] + [ + queries[split_size * (n_split - 1) :] + ] + split_punct_labels_lines = [ + punct_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1) + ] + [punct_label_lines[split_size * (n_split - 1) :]] + split_capit_labels_lines = [ + capit_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1) + ] + [capit_label_lines[split_size * (n_split - 1) :]] + args = list(zip(split_queries, split_punct_labels_lines, split_capit_labels_lines, range(n_split))) + if create_progress_process: + progress = Progress(len(queries), "Tokenization", "query") + progress_queue = progress.get_queues()[0] + if n_jobs > 0: + with mp.Pool(n_jobs) as pool: + result = pool.starmap( + TokenizeCreateMasksClipWorker( + max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue + ), + args, + ) + else: + result = [] + for x in args: + result.append( + TokenizeCreateMasksClipWorker( + max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue, + )(*x) + ) + if create_progress_process: + progress.finish() + input_ids, subtokens_mask, punct_labels, capit_labels = tuple(list(itertools.chain(*e)) for e in zip(*result)) + if verbose: + logging.info("Finished initial tokenization.") + get_stats([len(inp) for inp in input_ids]) + logging.info(f"Finished clipping and padding.") + for i in range(min(len(input_ids), 5)): + logging.info("*** Example ***") + logging.info("i: %s" % (i)) + logging.info("subtokens: %s" % " ".join(list(map(str, input_ids[i])))) + logging.info("subtokens_mask: %s" % " ".join(list(map(str, subtokens_mask[i])))) + logging.info("punct_labels: %s" % " ".join(list(map(str, punct_labels[i])))) + logging.info("capit_labels: %s" % " ".join(list(map(str, capit_labels[i])))) + return input_ids, subtokens_mask, punct_labels, capit_labels + + +def create_masks_and_segment_ids( + input_ids: ArrayLike, + subtokens_mask: ArrayLike, + pad_id: int, + cls_id: int, + sep_id: int, + ignore_start_end: bool, + ignore_extra_tokens: bool, +) -> Tuple[ArrayLike, ArrayLike, ArrayLike]: + """ + Creates segment ids array, input mask, loss mask. - capit_labels = [pad_id] - capit_query_labels = [capit_label_ids[lab] for lab in capit_labels_lines[i]] + Segment ids array is BERT token type ids in HuggingFace terminology. It is a zeros array for punctuation + and capitalization task. - for j, word in enumerate(words): - word_tokens = tokenizer.text_to_tokens(word) - subtokens.extend(word_tokens) + Input mask element is ``True`` if an element of ``input_ids`` is not padding and ``False`` otherwise. - loss_mask.append(1) - loss_mask.extend([int(not ignore_extra_tokens)] * (len(word_tokens) - 1)) + Loss mask element is ``True`` for the first token in a word. If ``ignore_start_end=False``, then loss mask + element is ``True`` for [CLS] and [SEP] tokens. If ``ignore_extra_tokens=False``, then loss mask element is ``True`` + for all word tokens. In all other cases loss mask elements are ``False``. - subtokens_mask.append(1) - subtokens_mask.extend([0] * (len(word_tokens) - 1)) + Args: + input_ids: an integer array of shape ``[Batch, Time]`` containing ids of source token ids + subtokens_mask: a boolean array of shape ``[Batch, Time]`` which elements are ``True`` if they correspond to + the first token of some word + pad_id: an id of padding token + cls_id: an id of [CLS] token + sep_id: an id of [SEP] token + ignore_start_end: whether to compute loss for [CLS] and [SEP] tokens + ignore_extra_tokens: whether to compute loss for not first tokens in words - if with_label: - punct_labels.extend([punct_query_labels[j]] * len(word_tokens)) - capit_labels.extend([capit_query_labels[j]] * len(word_tokens)) + Returns: + segment_ids: int8 array of shape [Batch, Time] + input_mask: boolean array of shape [Batch, Time] + loss_mask: boolean array of shape [Batch, Time] + """ + segment_ids = np.zeros_like(input_ids, dtype=np.int8) + input_mask = np.not_equal(input_ids, pad_id) + special_mask = np.equal(input_ids, cls_id) & np.equal(input_ids, sep_id) + if ignore_start_end: + if ignore_extra_tokens: + loss_mask = subtokens_mask + else: + loss_mask = input_mask & ~special_mask + else: + if ignore_extra_tokens: + loss_mask = subtokens_mask | special_mask + else: + loss_mask = input_mask + return segment_ids, input_mask, loss_mask - # add eos token - subtokens.append(tokenizer.sep_token) - loss_mask.append(1 - ignore_start_end) - subtokens_mask.append(0) - sent_lengths.append(len(subtokens)) - all_subtokens.append(subtokens) - all_loss_mask.append(loss_mask) - all_subtokens_mask.append(subtokens_mask) - all_input_mask.append([1] * len(subtokens)) - if with_label: - punct_labels.append(pad_id) - punct_all_labels.append(punct_labels) - capit_labels.append(pad_id) - capit_all_labels.append(capit_labels) - - max_seq_length = min(max_seq_length, max(sent_lengths)) - logging.info(f'Max length: {max_seq_length}') - get_stats(sent_lengths) - too_long_count = 0 - - for i, subtokens in enumerate(all_subtokens): - if len(subtokens) > max_seq_length: - subtokens = [tokenizer.cls_token] + subtokens[-max_seq_length + 1 :] - all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1 :] - all_loss_mask[i] = [int(not ignore_start_end)] + all_loss_mask[i][-max_seq_length + 1 :] - all_subtokens_mask[i] = [0] + all_subtokens_mask[i][-max_seq_length + 1 :] - - if with_label: - punct_all_labels[i] = [pad_id] + punct_all_labels[i][-max_seq_length + 1 :] - capit_all_labels[i] = [pad_id] + capit_all_labels[i][-max_seq_length + 1 :] - too_long_count += 1 - - all_input_ids.append(tokenizer.tokens_to_ids(subtokens)) - - if len(subtokens) < max_seq_length: - extra = max_seq_length - len(subtokens) - all_input_ids[i] = all_input_ids[i] + [0] * extra - all_loss_mask[i] = all_loss_mask[i] + [0] * extra - all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra - all_input_mask[i] = all_input_mask[i] + [0] * extra - - if with_label: - punct_all_labels[i] = punct_all_labels[i] + [pad_id] * extra - capit_all_labels[i] = capit_all_labels[i] + [pad_id] * extra - - all_segment_ids.append([0] * max_seq_length) - - logging.info(f'{too_long_count} are longer than {max_seq_length}') - - for i in range(min(len(all_input_ids), 5)): - logging.info("*** Example ***") - logging.info("i: %s" % (i)) - logging.info("subtokens: %s" % " ".join(list(map(str, all_subtokens[i])))) - logging.info("loss_mask: %s" % " ".join(list(map(str, all_loss_mask[i])))) - logging.info("input_mask: %s" % " ".join(list(map(str, all_input_mask[i])))) - logging.info("subtokens_mask: %s" % " ".join(list(map(str, all_subtokens_mask[i])))) - if with_label: - logging.info("punct_labels: %s" % " ".join(list(map(str, punct_all_labels[i])))) - logging.info("capit_labels: %s" % " ".join(list(map(str, capit_all_labels[i])))) - - return ( - all_input_ids, - all_segment_ids, - all_input_mask, - all_subtokens_mask, - all_loss_mask, - punct_all_labels, - capit_all_labels, - punct_label_ids, - capit_label_ids, - ) +def create_label_ids(unique_labels: Set[str], pad_label: str) -> Dict[str, int]: + """ + Returns label ids dictionary. ``pad_label`` always has id ``0``. Other labels are sorted in alphabetical order. + Args: + unique_labels: a set of labels from which label ids dictionary is created. May or may no contain ``pad_label`` + pad_label: label used for padding. It is also a neutral label + + Returns: + label ids dictionary + """ + label_ids = {pad_label: 0} + if pad_label in unique_labels: + unique_labels.remove(pad_label) + for label in sorted(unique_labels): + label_ids[label] = len(label_ids) + return label_ids + + +def load_label_ids(file_path: Union[str, os.PathLike]) -> Dict[str, int]: + ids = {} + with open(file_path) as f: + for i, line in enumerate(f): + ids[line.strip()] = i + return ids + + +def save_label_ids(label_ids: Dict[str, int], file_path: Path) -> None: + """ + Saves label ids map to a file. In each line of a file one label is saved. Labels are saved in the order of + increasing of their ids. + + Args: + label_ids: label id dictionary. Pad label has to have id ``0`` + file_path: path to a file where labels will be saved + """ + file_path.parent.mkdir(parents=True, exist_ok=True) + with file_path.open('w') as out: + labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1])) + out.write('\n'.join(labels)) + + +def raise_not_equal_labels_error( + first_labels: Dict[str, int], second_labels: Dict[str, int], first_labels_desc: str, second_labels_desc: str +) -> None: + """ + A helper function for raising comprehensible error if labels from 2 sources are different. + Such sources may include: + - labels stored in .nemo checkpoint + - labels stored in tarred dataset + - labels passed in config parameters ``model.common_dataset_parameters.{punct_label_ids,capit_label_ids}`` + - labels from files passed in config parameters ``model.class_labels.{punct_labels_file,capit_labels_file}`` + - labels in attributes ``PunctuationCapitalizationModel.{punct_label_ids,capit_label_ids}`` + - any other source + This function helps to detect configuration early and give error messages that are easy to interpret. + Call this function if ``first_labels != second_labels``. + + Args: + first_labels: first dictionary with labels + second_labels: second dictionary with labels + first_labels_desc: a description of first labels + second_labels_desc: a description of second labels + """ + missing_in_first = {k: second_labels[k] for k in set(second_labels) - set(first_labels)} + missing_in_second = {k: first_labels[k] for k in set(first_labels) - set(second_labels)} + not_equal = { + k: {'FIRST LABELS': first_labels[k], 'SECOND LABELS': second_labels[k]} + for k in set(first_labels) & set(second_labels) + if first_labels[k] != second_labels[k] + } + msg = f"{first_labels_desc} (FIRST LABELS) are not equal to {second_labels_desc} (SECOND LABELS)." + if len(missing_in_first) > 0: + msg += f" Number of SECOND LABELS missing in the FIRST LABELS: {len(missing_in_first)}." + if len(missing_in_second) > 0: + msg += f" Number of FIRST LABELS missing in the SECOND LABELS: {len(missing_in_second)}." + if len(not_equal) > 0: + msg += f" Number of labels which are not equal: {len(not_equal)}." + if len(missing_in_first) > 0: + msg += ( + f" Several examples of missing SECONDS LABELS in the FIRST LABELS: " + f"{dict(list(missing_in_first.items())[:3])}." + ) + if len(missing_in_second) > 0: + msg += ( + f" Several examples of missing FIRST LABELS in the SECOND LABELS: " + f"{dict(list(missing_in_second.items())[:3])}." + ) + if len(not_equal) > 0: + msg += f" Several examples of labels which are not equal: {dict(list(not_equal.items())[:3])}" + raise ValueError(msg) + + +def pad(vectors: List[ArrayLike], length: int, value: Union[int, float, bool]) -> ArrayLike: + """ + Pad vectors to length ``length`` and then stack. + Args: + vectors: a list of 1D arrays. Arrays to pad and stack + length: a length of padded sequence. Has to be greater or equal to the maximum length of an element of + ``vectors``. + value: a value used for padding + + Returns: + an array of padded vectors + """ + result = [] + for v in vectors: + result.append(np.concatenate([v, np.full([length - v.shape[0]], value, dtype=v.dtype)])) + return np.stack(result) class BertPunctuationCapitalizationDataset(Dataset): """ - Creates dataset to use during training for punctuaion and capitalization tasks with a pretrained model. - For dataset to use during inference without labels, see BertPunctuationCapitalizationInferDataset. + A dataset to use during training for punctuation and capitalization tasks. + For inference, you will need + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset.BertPunctuationCapitalizationInferDataset`. + For huge datasets which cannot be loaded into memory simultaneously use + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`. Args: - text_file: file to sequences, each line should a sentence, no header. - label_file: file to labels, each line corresponds to word labels for a sentence in the text_file. No header. - max_seq_length: max sequence length minus 2 for [CLS] and [SEP] - tokenizer: such as AutoTokenizer - num_samples: number of samples you want to use for the dataset. - If -1, use all dataset. Useful for testing. - pad_label: pad value use for labels. - by default, it's the neutral label. - punct_label_ids and capit_label_ids (dict): - dict to map labels to label ids. - Starts with pad_label->0 and then increases in alphabetical order - For dev set use label_ids generated during training to support - cases when not all labels are present in the dev set. - For training set label_ids should be None or loaded from cache - ignore_extra_tokens: whether to ignore extra tokens in the loss_mask - ignore_start_end: whether to ignore bos and eos tokens in the loss_mask - use_cache: whether to use processed data cache or not - get_label_frequencies: whether to generate label frequencies - punct_label_ids_file and capit_label_ids_file: name of the files to save in .nemo + text_file (:obj:`Union[str, os.PathLike]`): a path to a file with sequences, each line should contain a text + without punctuation and capitalization + labels_file (:obj:`Union[str, os.PathLike]`): a path to a file with labels, each line corresponds to word + labels for a sentence in the ``text_file``. Labels have to follow format described in this section of + documentation :ref:`NeMo Data Format<nemo-data-format-label>`. + max_seq_length (:obj:`int`): max number of tokens in a source sequence. ``max_seq_length`` includes for [CLS] + and [SEP] tokens. Sequences which are too long will be clipped by removal of tokens from the end of the + sequence. + tokenizer (:obj:`TokenizerSpec`): a tokenizer instance which has properties ``unk_id``, ``sep_id``, ``bos_id``, + ``eos_id``. + num_samples (:obj:`int`, `optional`, defaults to :obj:`-1`): a number of samples you want to use for the + dataset. If ``-1``, use all dataset. Useful for testing. + tokens_in_batch (:obj:`int`, `optional`, defaults to :obj:`5000`): number of tokens in a batch including + paddings and special tokens ([CLS], [SEP], [UNK]). This class :meth:`__getitem__` method returns not + samples but ready batches. Number of samples in a batch is adjusted for input sequences lengths. If input + sequences are short, then a batch will contain more samples. Before packing into batches, samples are + sorted by number of tokens they contain. Sorting allows to reduce number of pad tokens in a batch + significantly. Regular PyTorch data loader shuffling will only permute batches with changing their content. + Proper shuffling is achieved via calling method :meth:`repack_batches_with_shuffle` every epoch. + pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): pad value to use for labels. It's also the neutral + label both for punctuation and capitalization. + punct_label_ids (:obj:`Dict[str, int]`, `optional`): dict to map punctuation labels to label ids. For dev set, + use label ids generated during training to support cases when not all labels are present in the dev set. + For training, it is recommended to set ``punct_label_ids`` to ``None`` or load from cache. + capit_label_ids (:obj:`Dict[str, int]`, `optional`): same ``punct_label_ids`` for capitalization labels. + ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to compute loss on + tokens which are not first tokens in a word. For example, assume that word ``'tokenization'`` is tokenized + into ``['token', 'ization']``. If ``ignore_extra_tokens=True``, loss mask for the word is + ``[True, False]``, and if ``ignore_extra_tokens=False``, then loss mask is ``[True, True]``. + ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to ignore [CLS] and [SEP] tokens + in the loss_mask. + use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to use pickled features or not. If + pickled features does not exist and ``use_cache=True``, then pickled features will be created. Pickled + features are looked for and stored in ``cache_dir``. Pickled features include input ids, subtokens mask + (mask of first tokens in words), encoded punctuation and capitalization labels, label ids. Features + creation consumes considerable time and this ``use_cache=True`` significantly speeds up training starting. + + .. warning:: + If you spawned more then 1 processes BEFORE dataset creation, then the ``use_cache`` parameter + has to be ``True``. In PyTorch Lightning spawning is performed when `Trainer.fit() + <https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#fit>`_ or + `Trainer.test() <https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#test>`_ + are called. + cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where cache (pickled features) + is stored. By default, ``text_file`` parent directory is used. This parameter is useful if dataset + directory is read-only and you wish to pickle features. In such a case specify a path to directory which + allows writing in ``cache_dir`` parameter. + get_label_frequencies (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to print and save label + frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If + ``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir`` directory. + label_info_save_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where label frequencies + are saved. Be default a ``text_file`` parent directory is used. When method + :meth:`save_labels_and_get_file_paths` is called label ids are saved into ``label_info_save_dir`` + directory. Parameters ``cache_dir`` and ``label_info_save_dir`` are added for cases when directory + containing. This parameter is useful if directory containing ``text_file`` is read-only. + punct_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): a path to a .csv file containing + punctuation label vocabulary. Each line in such a vocabulary file contains exactly one label. The first + line has to contain `pad_label`, otherwise error will be raised. + capit_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): same as ``punct_label_vocab_file`` for + capitalization labels. + add_masks_and_segment_ids_to_batch (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to add + ``'loss_mask'``, ``'input_mask'``, ``'segment_ids'`` items to a batch. Useful for creation of tarred + dataset and can NOT be used during model training and inference. + verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to show data examples, label stats and + other useful information. + n_jobs (:obj:`int`, `optional`, defaults to :obj:`0`): number of workers used for tokenization, encoding + labels, creating "first token in word" mask, and clipping. If ``n_jobs <= 0`` data preparation is performed + without multiprocessing. By default ``n_jobs`` is equal to the number of CPUs. + + .. warning:: + There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT) + if ``n_jobs > 0``. + + tokenization_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting tokenization + progress. Useful for creation of tarred dataset + batch_mark_up_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in + deciding which samples batches will contain. Useful for creation of tarred dataset + batch_building_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in + batch creation (stacking and padding). Useful for creation of tarred dataset """ @property @@ -232,146 +873,101 @@ def output_types(self) -> Optional[Dict[str, NeuralType]]: def __init__( self, - text_file: str, - label_file: str, + text_file: Union[str, os.PathLike], + labels_file: Union[str, os.PathLike], max_seq_length: int, tokenizer: TokenizerSpec, num_samples: int = -1, + tokens_in_batch: int = 5000, pad_label: str = 'O', - punct_label_ids: Dict[str, int] = None, - capit_label_ids: Dict[str, int] = None, + punct_label_ids: Optional[Dict[str, int]] = None, + capit_label_ids: Optional[Dict[str, int]] = None, ignore_extra_tokens: bool = False, - ignore_start_end: bool = False, + ignore_start_end: bool = True, use_cache: bool = True, + cache_dir: Optional[Union[str, os.PathLike]] = None, get_label_frequencies: bool = False, - punct_label_ids_file: str = 'punct_label_ids.csv', - capit_label_ids_file: str = 'capit_label_ids.csv', - ): + label_info_save_dir: Optional[Union[str, os.PathLike]] = None, + punct_label_vocab_file: Optional[Union[str, os.PathLike]] = None, + capit_label_vocab_file: Optional[Union[str, os.PathLike]] = None, + add_masks_and_segment_ids_to_batch: bool = True, + verbose: bool = True, + n_jobs: Optional[int] = 0, + tokenization_progress_queue: Optional[mp.Queue] = None, + batch_mark_up_progress_queue: Optional[mp.Queue] = None, + batch_building_progress_queue: Optional[mp.Queue] = None, + ) -> None: """ Initializes BertPunctuationCapitalizationDataset. """ - - if not (os.path.exists(text_file) and os.path.exists(label_file)): - raise FileNotFoundError( - f'{text_file} or {label_file} not found. The data should be splitted into 2 files: text.txt and \ - labels.txt. Each line of the text.txt file contains text sequences, where words are separated with \ - spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are \ - separated with spaces. Each line of the files should follow the format: \ - [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \ - [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).' - ) - - # Cache features - data_dir = os.path.dirname(text_file) - filename = os.path.basename(text_file) - - if not filename.endswith('.txt'): - raise ValueError("{text_file} should have extension .txt") - - filename = filename[:-4] - vocab_size = getattr(tokenizer, "vocab_size", 0) - features_pkl = os.path.join( - data_dir, - "cached_{}_{}_{}_{}_{}".format( - filename, tokenizer.name, str(max_seq_length), str(vocab_size), str(num_samples) - ), + self._check_constructor_parameters( + text_file, + labels_file, + punct_label_ids, + capit_label_ids, + punct_label_vocab_file, + capit_label_vocab_file, + num_samples, + use_cache, ) - - self.punct_label_ids_file = os.path.join(data_dir, punct_label_ids_file) - self.capit_label_ids_file = os.path.join(data_dir, capit_label_ids_file) + if punct_label_vocab_file is not None: + punct_label_vocab_file = Path(punct_label_vocab_file).expanduser() + punct_label_ids = load_label_ids(punct_label_vocab_file) + if capit_label_vocab_file is not None: + capit_label_vocab_file = Path(capit_label_vocab_file).expanduser() + capit_label_ids = load_label_ids(capit_label_vocab_file) + text_file, labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser() + if label_info_save_dir is None: + self.label_info_save_dir = text_file.parent + else: + self.label_info_save_dir = Path(label_info_save_dir).expanduser() + + self.tokens_in_batch = tokens_in_batch + self.tokenizer = tokenizer + self.pad_label = pad_label + self.ignore_extra_tokens = ignore_extra_tokens + self.ignore_start_end = ignore_start_end + self.add_masks_and_segment_ids_to_batch = add_masks_and_segment_ids_to_batch + self.verbose = verbose + self.batch_mark_up_progress_queue = batch_mark_up_progress_queue + self.batch_building_progress_queue = batch_building_progress_queue master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0 - cache_files_exist = ( - os.path.exists(features_pkl) - and os.path.exists(self.punct_label_ids_file) - and os.path.exists(self.capit_label_ids_file) - ) + features_pkl = self._get_path_to_pkl_features(text_file, cache_dir, max_seq_length, num_samples) features = None - if master_device and not (cache_files_exist and use_cache): - if num_samples == 0: - raise ValueError("num_samples has to be positive", num_samples) - logging.info(f'Processing {text_file}') - with open(text_file, 'r') as f: - text_lines = f.readlines() - - # Collect all possible labels - punct_unique_labels = set() - capit_unique_labels = set() - punct_labels_lines = [] - capit_labels_lines = [] - with open(label_file, 'r') as f: - for line in f: - line = line.strip().split() - - # extract punctuation and capitalization labels - punct_line, capit_line = zip(*line) - punct_labels_lines.append(punct_line) - capit_labels_lines.append(capit_line) - - punct_unique_labels.update(punct_line) - capit_unique_labels.update(capit_line) - - if len(punct_labels_lines) != len(text_lines): - raise ValueError("Labels file should contain labels for every word") - - dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines)) - - if num_samples > 0: - dataset = dataset[:num_samples] - - dataset = list(zip(*dataset)) - text_lines = dataset[0] - punct_labels_lines = dataset[1] - capit_labels_lines = dataset[2] - - # for dev/test sets use label mapping from training set + if master_device and not (features_pkl.is_file() and use_cache): + if verbose: + logging.info(f'Processing {text_file}') + res = self._read_dataset(text_file, labels_file, num_samples) + text_lines, punct_label_lines, capit_label_lines, punct_unique_labels, capit_unique_labels = res if punct_label_ids: - if len(punct_label_ids) != len(punct_unique_labels): - logging.info( - 'Not all labels from the specified' - + 'label_ids dictionary are present in the' - + 'current dataset. Using the provided' - + 'label_ids dictionary.' - ) - else: - logging.info('Using the provided label_ids dictionary.') + self._check_label_ids_vs_unique_labels( + punct_label_ids, punct_unique_labels, 'punct', 'punctuation', labels_file + ) else: - logging.info( - 'Creating a new label to label_id dictionary.' - + ' It\'s recommended to use label_ids generated' - + ' during training for dev/test sets to avoid' - + ' errors if some labels are not' - + ' present in the dev/test sets.' - + ' For training set label_ids should be None.' + punct_label_ids = create_label_ids(punct_unique_labels, self.pad_label) + if capit_label_ids: + self._check_label_ids_vs_unique_labels( + capit_label_ids, capit_unique_labels, 'capit', 'capitalzation', labels_file ) - - def create_label_ids(unique_labels, pad_label=pad_label): - label_ids = {pad_label: 0} - if pad_label in unique_labels: - unique_labels.remove(pad_label) - for label in sorted(unique_labels): - label_ids[label] = len(label_ids) - return label_ids - - punct_label_ids = create_label_ids(punct_unique_labels) - capit_label_ids = create_label_ids(capit_unique_labels) - - self._save_label_ids(punct_label_ids, self.punct_label_ids_file) - self._save_label_ids(capit_label_ids, self.capit_label_ids_file) - - features = get_features( + else: + capit_label_ids = create_label_ids(capit_unique_labels, self.pad_label) + features = _get_features( text_lines, + punct_label_lines, + capit_label_lines, max_seq_length, - tokenizer, - pad_label=pad_label, - punct_labels_lines=punct_labels_lines, - capit_labels_lines=capit_labels_lines, + self.tokenizer, + pad_label=self.pad_label, punct_label_ids=punct_label_ids, capit_label_ids=capit_label_ids, - ignore_extra_tokens=ignore_extra_tokens, - ignore_start_end=ignore_start_end, + verbose=self.verbose, + progress_queue=tokenization_progress_queue, + n_jobs=n_jobs, ) - - pickle.dump(features, open(features_pkl, "wb")) - logging.info(f'Features saved to {features_pkl}') + if use_cache: + features_pkl.parent.mkdir(parents=True, exist_ok=True) + pickle.dump(tuple(list(features) + [punct_label_ids, capit_label_ids]), open(features_pkl, "wb")) + if self.verbose: + logging.info(f'Features saved to {features_pkl}') # wait until the master process writes to the processed data files if torch.distributed.is_initialized(): @@ -379,309 +975,489 @@ def create_label_ids(unique_labels, pad_label=pad_label): if features is None: features = pickle.load(open(features_pkl, 'rb')) - logging.info(f'Features restored from {features_pkl}') - - self.all_input_ids = features[0] - self.all_segment_ids = features[1] - self.all_input_mask = features[2] - self.all_subtokens_mask = features[3] - self.all_loss_mask = features[4] - self.punct_all_labels = features[5] - self.capit_all_labels = features[6] - self.punct_label_ids = features[7] - self.capit_label_ids = features[8] + li = features[-2:] + self._check_label_ids_loaded_from_pkl( + punct_label_ids, capit_label_ids, *li, punct_label_vocab_file, capit_label_vocab_file, features_pkl + ) + punct_label_ids, capit_label_ids = li[-2], li[-1] + if tokenization_progress_queue is not None: + tokenization_progress_queue.put(len(features[0])) + if self.verbose: + logging.info(f'Features restored from {features_pkl}') + features = features[:-2] + + self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels = features + self.punct_label_ids, self.capit_label_ids = punct_label_ids, capit_label_ids + self.batches = self._pack_into_batches( + self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels + ) if get_label_frequencies: - self.punct_label_frequencies = self._calculate_label_frequencies(self.punct_all_labels, data_dir, 'punct') - self.capit_label_frequencies = self._calculate_label_frequencies(self.capit_all_labels, data_dir, 'capit') + self.punct_label_frequencies = self._calculate_and_save_label_frequencies(self.punct_labels, 'punct') + self.capit_label_frequencies = self._calculate_and_save_label_frequencies(self.capit_labels, 'capit') + + def _get_path_to_pkl_features( + self, text_file: Path, cache_dir: Optional[Union[str, os.PathLike]], max_seq_length: int, num_samples: int + ) -> Path: + if cache_dir is None: + cache_dir = text_file.parent + else: + cache_dir = Path(cache_dir).expanduser() + vocab_size = getattr(self.tokenizer, "vocab_size", 0) + features_pkl = cache_dir / "cached.{}.{}.max_seq_length{}.vocab{}.{}.punctuation_capitalization.pkl".format( + text_file.stem, + self.tokenizer.name, + max_seq_length, + vocab_size, + f'num_samples{num_samples}' if num_samples > 0 else 'all_samples', + ) + return features_pkl + + @staticmethod + def _check_constructor_parameters( + text_file: Union[str, os.PathLike], + labels_file: Union[str, os.PathLike], + punct_label_ids: Optional[Dict[str, int]], + capit_label_ids: Optional[Dict[str, int]], + punct_label_vocab_file: Union[str, os.PathLike], + capit_label_vocab_file: Union[str, os.PathLike], + num_samples: int, + use_cache: bool, + ) -> None: + if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1 and not use_cache: + raise ValueError( + f"If you already created process group and the world size is greater than 1, then `use_cache` " + f"parameter has to `True`. Only master process prepares features and if `use_cache=False`, then " + f"other processes will not be able to obtain features. Alternatively, you may set `use_cache=False` " + f"and set up data before spawning processes. Use `cache_dir` dataset directory with " + f"`text_file` and `labels_file` is read-only." + ) + if not (os.path.exists(text_file) and os.path.exists(labels_file)): + raise FileNotFoundError( + f'{text_file} or {labels_file} not found. The data should be split into 2 files: text.txt and' + f'labels.txt. Each line of the text.txt file contains text sequences, where words are separated with' + f'spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are' + f'separated with spaces. Each line of the files should follow the format:\n' + f' [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and ' + f' [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).' + ) + if not str(text_file).endswith('.txt'): + raise ValueError( + f"Parameter `text_file` has to be path to a file with .txt extension, whereas `text_file={text_file}`" + ) + if not str(labels_file).endswith('.txt'): + raise ValueError( + f"Parameter `labels_file` has to be path to a file with .txt extension, whereas " + f"`labels_file={labels_file}`" + ) + if punct_label_ids is not None and punct_label_vocab_file is not None: + punct_label_vocab_file = Path(punct_label_vocab_file).expanduser() + file_punct_label_ids = load_label_ids(punct_label_vocab_file) + if file_punct_label_ids != punct_label_ids: + raise_not_equal_labels_error( + first_labels=punct_label_ids, + second_labels=file_punct_label_ids, + first_labels_desc='Punctuation labels passed to the `PunctuationCapitalizationDataset` ' + 'constructor in parameter `punct_label_ids`', + second_labels_desc=f'Punctuation labels loaded from file {punct_label_vocab_file} path to which ' + f'is passed in parameter `punct_label_vocab_file`', + ) + if capit_label_ids is not None and capit_label_vocab_file is not None: + capit_vocab_file = Path(capit_label_vocab_file).expanduser() + file_capit_label_ids = load_label_ids(capit_vocab_file) + if file_capit_label_ids != capit_label_ids: + raise_not_equal_labels_error( + first_labels=capit_label_ids, + second_labels=file_capit_label_ids, + first_labels_desc='Capitalization labels passed to the `PunctuationCapitalizationDataset` ' + 'constructor in parameter `capit_label_ids`', + second_labels_desc=f'Capitalization labels loaded from file {capit_label_vocab_file} path to ' + f'which is passed in parameter `capit_label_vocab_file`', + ) + if num_samples == 0: + raise ValueError( + f"Parameter `num_samples` has to be positive or negative whereas `num_samples={num_samples}`. " + f"Negative `num_samples` is for using all samples in a dataset." + ) - def _calculate_label_frequencies(self, all_labels: List[int], data_dir: str, name: str) -> Dict[str, float]: - """ Calculates labels frequencies """ - merged_labels = itertools.chain.from_iterable(all_labels) - logging.info('Three most popular labels') - _, label_frequencies, _ = get_label_stats(merged_labels, data_dir + '/label_count_' + name + '.tsv') - return label_frequencies + @staticmethod + def _check_label_ids_loaded_from_pkl( + parameter_punct_label_ids: Dict[str, int], + parameter_capit_label_ids: Dict[str, int], + pkl_punct_label_ids: Any, + pkl_capit_label_ids: Any, + punct_label_vocab_file: Optional[Path], + capit_label_vocab_file: Optional[Path], + features_file: Path, + ) -> None: + if not isinstance(pkl_punct_label_ids, dict): + raise ValueError( + f"Punctuation label ids loaded from features file {features_file} has wrong type " + f"{type(pkl_punct_label_ids)}" + ) + if parameter_punct_label_ids is not None: + if parameter_punct_label_ids != pkl_punct_label_ids: + raise_not_equal_labels_error( + first_labels=parameter_punct_label_ids, + second_labels=pkl_punct_label_ids, + first_labels_desc="Punctuation labels passed in parameter `punct_label_ids`" + if punct_label_vocab_file is None + else f"Punctuation labels loaded from file {punct_label_vocab_file}", + second_labels_desc=f"Punctuation label ids loaded from features file {features_file}", + ) + if not isinstance(pkl_capit_label_ids, dict): + raise ValueError( + f"Capitalization label ids loaded from features file {features_file} has wrong type " + f"{type(pkl_capit_label_ids)}" + ) + if parameter_capit_label_ids is not None: + if parameter_capit_label_ids != pkl_capit_label_ids: + raise_not_equal_labels_error( + first_labels=parameter_capit_label_ids, + second_labels=pkl_capit_label_ids, + first_labels_desc="Capitalization labels passed in parameter `capit_label_ids`" + if capit_label_vocab_file is None + else f"Capitalization labels loaded from file {capit_label_vocab_file}", + second_labels_desc=f"Capitalization label ids loaded from features file {features_file}", + ) - def _save_label_ids(self, label_ids: Dict[str, int], filename: str) -> None: - """ Saves label ids map to a file """ - with open(filename, 'w') as out: - labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1])) - out.write('\n'.join(labels)) - logging.info(f'Labels: {label_ids}') - logging.info(f'Labels mapping saved to : {out.name}') - - def __len__(self): - return len(self.all_input_ids) - - def __getitem__(self, idx): - return ( - np.array(self.all_input_ids[idx]), - np.array(self.all_segment_ids[idx]), - np.array(self.all_input_mask[idx], dtype=np.long), - np.array(self.all_subtokens_mask[idx]), - np.array(self.all_loss_mask[idx]), - np.array(self.punct_all_labels[idx]), - np.array(self.capit_all_labels[idx]), - ) + @staticmethod + def _check_label_ids_vs_unique_labels( + label_ids: Dict[str, int], unique_labels: Set[str], label_type: str, task: str, label_file: Path + ) -> None: + if unique_labels - set(label_ids): + not_present_labels = list(unique_labels - set(label_ids)) + raise ValueError( + f"{len(not_present_labels)} {task} labels found in {label_file} are not present in " + f"`{label_type}_label_ids`. Examples of unexpected labels from {label_file}: {not_present_labels[:3]}" + ) + @staticmethod + def _read_dataset( + text_file: Path, labels_file: Path, num_samples: int + ) -> Tuple[Tuple[str, ...], Tuple[str, ...], Tuple[str, ...], Set[str], Set[str]]: + with open(text_file, 'r') as f: + text_lines = f.readlines() + punct_unique_labels, capit_unique_labels = set(), set() + punct_labels_lines, capit_labels_lines = [], [] + with labels_file.open() as f: + for i, line in enumerate(f): + pairs = line.split() + if not all([len(p) == 2 for p in pairs]): + raise ValueError( + f"Some label pairs are not pairs but have wrong length (!= 2) in line {i} in label file " + f"{labels_file}" + ) + words = text_lines[i].split() + if len(pairs) != len(words): + raise ValueError( + f"In line {i} in text file {text_file} number of words {len(words)} is not equal to the " + f"number of labels {len(pairs)} in labels file {labels_file}." + ) + punct_line, capit_line = zip(*pairs) + punct_labels_lines.append(punct_line) + capit_labels_lines.append(capit_line) + punct_unique_labels.update(punct_line) + capit_unique_labels.update(capit_line) + + if len(punct_labels_lines) != len(text_lines): + raise ValueError( + f"Number of text lines {len(text_lines)} in text file {text_file} is not equal to the number of lines " + f"{len(punct_labels_lines)} in labels file {labels_file}." + ) + dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines)) + if len(dataset) == 0: + raise ValueError(f"Dataset loaded from files {text_file} and {labels_file} is empty.") + if num_samples > 0: + dataset = dataset[:num_samples] + text_lines, punct_labels_lines, capit_labels_lines = zip(*dataset) + return text_lines, punct_labels_lines, capit_labels_lines, punct_unique_labels, capit_unique_labels + + def _mark_up_batches(self, input_ids: List[ArrayLike]) -> Tuple[List[int], List[int], List[int]]: + """ + Computes indices of first samples in batch, batch sizes, seq lengths for batches. ``input_ids`` has to be + sorted by number of tokens in ascending order. -def _get_subtokens_and_subtokens_mask(query: str, tokenizer: TokenizerSpec) -> Tuple[List[str], List[int]]: - """ - Tokenizes input query into subtokens and creates subtokens mask. Subtokens mask is an array of the same length as - subtokens array and contains zeros and ones in which. If element of mask equals 1, then corresponding subtoken in - subtokens array is first subtoken in some word - Args: - query: a string that will be tokenized - tokenizer: an instance of tokenizer - Returns: - subtokens: list of subtokens - subtokens_mask: list of ints - """ - words = query.strip().split() - subtokens = [] - subtokens_mask = [] - for j, word in enumerate(words): - word_tokens = tokenizer.text_to_tokens(word) - subtokens.extend(word_tokens) - subtokens_mask.append(1) - subtokens_mask.extend([0] * (len(word_tokens) - 1)) - return subtokens, subtokens_mask - - -def _check_max_seq_length_and_margin_and_step(max_seq_length: int, margin: int, step: int): - """ - Checks values of ``max_seq_length``, ``margin``, and ``step``. - Args: - max_seq_length: a segment length with ``[CLS]`` and ``[SEP]`` tokens - margin: a number of input tokens near edges of segments which are not used in punctuation and capitalization - prediction. - step: offset of consequent segments. - Returns: - None - """ - if max_seq_length < 3: - raise ValueError( - f"Parameter `max_seq_length={max_seq_length}` cannot be less than 3 because `max_seq_length` is a length " - f"of a segment with [CLS] and [SEP] tokens." - ) - if margin >= (max_seq_length - 2) // 2 and margin > 0 or margin < 0: - raise ValueError( - f"Parameter `margin` has to be not negative and less than `(max_seq_length - 2) // 2`. Don't forget about " - f"CLS and EOS tokens in the beginning and the end of segment. margin={margin}, " - f"max_seq_length={max_seq_length}" - ) - if step <= 0: - raise ValueError(f"Parameter `step` has to be positive whereas step={step}") - if step > max_seq_length - 2 - 2 * margin: - logging.warning( - f"Parameter step={step} is too big. It will be reduced to `min(max_seq_length, <maximum query length> + 2) " - f"- 2 - 2 * margin`." - ) + Batches are marked up with respect to following conditions: + - total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch`` + - batch size is evenly divisible by 8 (except for the last batch) + - seq length (elements of the third returned object) is evenly divisible by 8 + If ``self.batch_mark_up_progress_queue`` is not None, then the progress in mark up is reported via + ``self.batch_mark_up_progress_queue``. Otherwise, ``tqdm`` instance is created in this function. -def get_features_infer( - queries: List[str], - tokenizer: TokenizerSpec, - max_seq_length: int = 64, - step: Optional[int] = 8, - margin: Optional[int] = 16, -) -> Tuple[ - List[List[int]], List[List[int]], List[List[int]], List[List[int]], List[int], List[int], List[bool], List[bool], -]: - """ - Processes the data and returns features. + Args: + input_ids: a list of 1D int32 arrays. Elements of ``input_ids`` have to be sorted by length in ascending + order - Args: - queries: text sequences - tokenizer: such as AutoTokenizer - max_seq_length: max sequence length minus 2 for [CLS] and [SEP] - step: relative shift of consequent segments into which long queries are split. Long queries are split into - segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are - tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is - tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. - margin: number of subtokens near edges of segments which are not used for punctuation and capitalization - prediction. The first segment does not have left margin and the last segment does not have right - margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``, - ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments - ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], - ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions - computation, margins are removed. In the next list, subtokens which logits are not used for final - predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], - ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. + Returns: + batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch + batch_sizes: a list of numbers of samples in batches + batch_seq_lengths: a list of sequence lengths after padding for every batch + """ + batch_beginnings, batch_sizes, batch_seq_lengths = [], [], [] + current_max_length = 0 + start = 0 + if self.batch_mark_up_progress_queue is None: + inp_iterator = tqdm(enumerate(input_ids), total=len(input_ids), desc="Batch mark up", unit="query") + else: + inp_iterator = enumerate(input_ids) + progress_made = 0 + for i, inp in inp_iterator: + current_max_length = max(current_max_length, ceil(len(inp) / 8) * 8) + if current_max_length * (i + 1 - start) > self.tokens_in_batch: + batch_size = (i - start) // 8 * 8 + if batch_size == 0: + if i > start: + batch_size = i - start + logging.warning( + f"Could not create batch with multiple of 8 size. Probably there is a too long sequence in " + f"the dataset. current_max_length={current_max_length}. Batch size will be reduced to " + f"{batch_size}. tokens_in_batch={self.tokens_in_batch}. The batch includes sequences from " + f"{start} to {i - 1}." + ) + else: + logging.warning( + f"Input sequence number {i - 1} is too long. Could not fit it into batch with " + f"{self.tokens_in_batch} tokens. Sequence number {i - 1} will not be added to batches." + ) + start = i + current_max_length = ceil(len(inp) / 8) * 8 + continue + seq_length = ceil(max([len(inp) for inp in input_ids[start : start + batch_size]]) / 8) * 8 + batch_beginnings.append(start) + batch_sizes.append(batch_size) + batch_seq_lengths.append(seq_length) + start += batch_size + current_max_length = ceil(max([len(inp) for inp in input_ids[start : i + 1]]) / 8) * 8 + if self.batch_mark_up_progress_queue is not None: + progress_made += 1 + if progress_made >= BATCH_MARK_UP_PROGRESS_REPORT_PERIOD: + self.batch_mark_up_progress_queue.put(progress_made) + progress_made = 0 + if start < len(input_ids): + seq_length = ceil(max([len(inp) for inp in input_ids[start:]]) / 8) * 8 + batch_beginnings.append(start) + batch_sizes.append(len(input_ids) - start) + batch_seq_lengths.append(seq_length) + if self.batch_mark_up_progress_queue is not None: + self.batch_mark_up_progress_queue.put(progress_made) + assert sum(batch_sizes) == len(input_ids) + for i in range(len(batch_beginnings) - 1): + assert batch_beginnings[i] + batch_sizes[i] == batch_beginnings[i + 1] + assert batch_seq_lengths[i] >= max( + [len(inp) for inp in input_ids[batch_beginnings[i] : batch_beginnings[i] + batch_sizes[i]]] + ) + return batch_beginnings, batch_sizes, batch_seq_lengths - Returns: - all_input_ids: list of input ids of all segments - all_segment_ids: token type ids of all segments - all_input_mask: attention mask to use for BERT model - all_subtokens_mask: masks out all subwords besides the first one - all_quantities_of_preceding_words: number of words in query preceding a segment. Used for joining - predictions from overlapping segments. - all_query_ids: index of a query to which segment belongs - all_is_first: is segment first segment in a query - all_is_last: is segment last segment in a query - """ - st = [] - stm = [] - sent_lengths = [] - for i, query in enumerate(queries): - subtokens, subtokens_mask = _get_subtokens_and_subtokens_mask(query, tokenizer) - sent_lengths.append(len(subtokens)) - st.append(subtokens) - stm.append(subtokens_mask) - _check_max_seq_length_and_margin_and_step(max_seq_length, margin, step) - if max_seq_length > max(sent_lengths) + 2: - max_seq_length = max(sent_lengths) + 2 - # If `max_seq_length` is greater than maximum length of input query, parameters ``margin`` and ``step`` are - # not used will not be used. - step = 1 - # Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS - length = max_seq_length - 2 - else: - # Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS - length = max_seq_length - 2 - step = min(length - margin * 2, step) - logging.info(f'Max length: {max_seq_length}') - get_stats(sent_lengths) - all_input_ids, all_segment_ids, all_subtokens_mask, all_input_mask, all_input_mask = [], [], [], [], [] - all_quantities_of_preceding_words, all_query_ids, all_is_first, all_is_last = [], [], [], [] - for q_i, query_st in enumerate(st): - q_inp_ids, q_segment_ids, q_subtokens_mask, q_inp_mask, q_quantities_of_preceding_words = [], [], [], [], [] - for i in range(0, max(len(query_st), length) - length + step, step): - subtokens = [tokenizer.cls_token] + query_st[i : i + length] + [tokenizer.sep_token] - q_inp_ids.append(tokenizer.tokens_to_ids(subtokens)) - q_segment_ids.append([0] * len(subtokens)) - q_subtokens_mask.append([0] + stm[q_i][i : i + length] + [0]) - q_inp_mask.append([1] * len(subtokens)) - q_quantities_of_preceding_words.append(np.count_nonzero(stm[q_i][:i])) - all_input_ids.append(q_inp_ids) - all_segment_ids.append(q_segment_ids) - all_subtokens_mask.append(q_subtokens_mask) - all_input_mask.append(q_inp_mask) - all_quantities_of_preceding_words.append(q_quantities_of_preceding_words) - all_query_ids.append([q_i] * len(q_inp_ids)) - all_is_first.append([True] + [False] * (len(q_inp_ids) - 1)) - all_is_last.append([False] * (len(q_inp_ids) - 1) + [True]) - return ( - list(itertools.chain(*all_input_ids)), - list(itertools.chain(*all_segment_ids)), - list(itertools.chain(*all_input_mask)), - list(itertools.chain(*all_subtokens_mask)), - list(itertools.chain(*all_quantities_of_preceding_words)), - list(itertools.chain(*all_query_ids)), - list(itertools.chain(*all_is_first)), - list(itertools.chain(*all_is_last)), - ) + def _pack_into_batches( + self, + input_ids: List[ArrayLike], + subtokens_mask: List[ArrayLike], + punct_labels: List[ArrayLike], + capit_labels: List[ArrayLike], + ) -> List[Dict[str, ArrayLike]]: + """ + Shuffle input sequences, sort them by number of tokens, pad, and pack into batches which satisfy following + conditions: + - total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch`` + - batch size is evenly divisible by 8 (except for the last batch) + - seq length (elements of the third returned object) is evenly divisible by 8 + Created batches are shuffled before returning. + + If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then ``'segment_ids'``, ``'loss_mask'``, and + ``'input_mask'`` are added to the batch. + + If ``self.batch_building_progress_queue`` is not ``None``, then padding progress is reported to + ``self.batch_building_progress_queue``. Otherwise, a new ``tqdm`` instance is created in ``pack_into_batches`` + method. + + Args: + input_ids: a list of 1D int32 arrays which contain token ids of dataset source + subtokens_mask: a list of 1D boolean arrays which elements are ``True`` if corresponding token is the + first token in some word + punct_labels: a list of 1D int32 arrays which contain encoded punctuation labels + capit_labels: a list of 1D int32 arrays which contain encoded capitalization labels + + Returns: + a list of batches. Each batch is a dictionary with items: + - ``'input_ids'``: a ``np.int32`` numpy array; + - ``'subtokens_mask'``: a boolean numpy array; + - ``'punct_labels'``: a ``np.int32`` numpy array; + - ``'capit_labels'``: a ``np.int32`` numpy array. + If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then a batch also contain items + - ``'segment_ids'``: a ``np.int8`` numpy array; + - ``'input_mask'``: a boolean numpy array; + - ``'loss_mask'``: a boolean numpy array. + + The values of a batch dictionary are numpy arrays of identical shape. + """ + zipped = list(zip(input_ids, subtokens_mask, punct_labels, capit_labels)) + random.shuffle(zipped) + input_ids, subtokens_mask, punct_labels, capit_labels = zip(*sorted(zipped, key=lambda x: x[0].shape[0])) + batch_beginnings, batch_sizes, batch_seq_lengths = self._mark_up_batches(input_ids) + batches = [] + if self.batch_building_progress_queue is None: + inp_iterator = tqdm( + zip(batch_beginnings, batch_sizes, batch_seq_lengths), + total=len(batch_beginnings), + desc="Batch building", + unit="batch", + ) + else: + # In this case we report number of queries not number of batches + inp_iterator = zip(batch_beginnings, batch_sizes, batch_seq_lengths) + progress_made = 0 + for start, size, length in inp_iterator: + batch_input_ids = pad(input_ids[start : start + size], length, self.tokenizer.pad_id) + batch_subtokens_mask = pad(subtokens_mask[start : start + size], length, False) + batch = { + "input_ids": batch_input_ids, + "subtokens_mask": batch_subtokens_mask, + "punct_labels": pad( + punct_labels[start : start + size], length, self.punct_label_ids[self.pad_label] + ).astype(np.int64), + "capit_labels": pad( + capit_labels[start : start + size], length, self.capit_label_ids[self.pad_label] + ).astype(np.int64), + } + if self.add_masks_and_segment_ids_to_batch: + batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids( + batch_input_ids, + batch_subtokens_mask, + self.tokenizer.pad_id, + self.tokenizer.cls_id, + self.tokenizer.sep_id, + self.ignore_start_end, + self.ignore_extra_tokens, + ) + batch['segment_ids'] = batch_segment_ids + batch['input_mask'] = batch_input_mask + batch['loss_mask'] = batch_loss_mask + batches.append(batch) + if self.batch_building_progress_queue is not None: + progress_made += size + if progress_made >= BATCH_BUILDING_PROGRESS_REPORT_PERIOD: + self.batch_building_progress_queue.put(progress_made) + progress_made = 0 + if self.batch_building_progress_queue is not None: + self.batch_building_progress_queue.put(progress_made) + random.shuffle(batches) + return batches + + def repack_batches_with_shuffle(self) -> None: + """A function for proper shuffling of a dataset. Pytorch data loader shuffing will only permute batches.""" + logging.info("Shuffling training dataset") + self.batches = self._pack_into_batches( + self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels + ) + def _calculate_and_save_label_frequencies(self, all_labels: List[ArrayLike], name: str) -> Dict[str, float]: + """Calculates and saves labels frequencies in :attr:`label_info_save_dir`.""" + merged_labels = itertools.chain.from_iterable(all_labels) + if self.verbose: + logging.info('Three most popular labels') + self.label_info_save_dir.mkdir(parents=True, exist_ok=True) + _, label_frequencies, _ = get_label_stats( + merged_labels, str(self.label_info_save_dir / f'label_count_{name}.tsv') + ) + return label_frequencies -class BertPunctuationCapitalizationInferDataset(Dataset): - """ - Creates dataset to use during inference for punctuation and capitalization tasks with a pretrained model. - For dataset to use during training with labels, see BertPunctuationCapitalizationDataset. + def save_labels_and_get_file_paths( + self, punct_labels_file_name: str, capit_labels_file_name: str + ) -> Tuple[Path, Path]: + """ + Saves label ids into files located in ``self.label_info_save_dir``. Saved label ids are usually used for + ``.nemo`` checkpoint creation. - Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments - which then processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization - including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is shift - between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near - borders of segments which have only one side context. + The signatures of this method and the signature of the method + :meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationTarredDataset.save_labels_and_get_file_paths` + must be identical. - Args: - queries: list of sequences. - tokenizer: such as AutoTokenizer - max_seq_length: max sequence length minus 2 for [CLS] and [SEP] - step: relative shift of consequent segments into which long queries are split. Long queries are split into - segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are - tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is - tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. - margin: number of subtokens in the beginning and the end of segments which are not used for prediction - computation. The first segment does not have left margin and the last segment does not have right - margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``, - ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments - ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], - ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions - computation, margins are removed. In the next list, subtokens which logits are not used for final - predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], - ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. - """ + Args: + punct_labels_file_name (:obj:`str`): a name of a punctuation labels file + capit_labels_file_name (:obj:`str`): a name of a capitalization labels file - @property - def output_types(self) -> Optional[Dict[str, NeuralType]]: - """Returns definitions of module output ports. - - input_ids: ids of word subtokens encoded using tokenizer - segment_ids: an array of zeros - input_mask: attention mask. Zeros if input is padding. - subtoken_mask: a mask used for retrieving predictions for words. An element equals ``1`` if corresponding - token is the first token in some word and zero otherwise. For example, if input query - "language processing" is tokenized into ["[CLS]", "language", "process", "ing", "SEP"], then - ``subtokens_mask`` will be [0, 1, 1, 0, 0]. - quantities_of_preceding_words: number of words preceding a segment in a query. It is used for uniting - predictions from different segments if such segments overlap. For example, if query "hello john" is - tokenized into segments ``[['hell', 'o'], ['john']]``, then ``quantities_of_preceding_words=[0, 1]``. - query_ids: ids of queries to which segments belong. For example, if ``queries=["foo", "bar"]`` are - segmented into ``[[['[CLS]', 'f', 'o', '[SEP]'], ['[CLS]', 'o', 'o', '[SEP]']], - [['[CLS]', 'b', 'a', '[SEP]'], ['[CLS]', 'a', 'r', '[SEP]']]]``, then for batch - [['[CLS]', 'o', 'o', '[SEP]'], ['[CLS]', 'b', 'a', '[SEP]'], ['[CLS]', 'a', 'r', '[SEP]']] - ``query_ids=[0, 1, 1]``. - is_first: is segment the first segment in query. The left margin of the first segment in a query is not - removed and this parameter is used to identify first segments. - is_last: is segment the last segment in query. The right margin of the last segment in a query is not - removed and this parameter is used to identify last segments. + Returns: + :obj:`Tuple[pathlib.Path, pathlib.Path]`: a tuple containing: + - :obj:`pathlib.Path`: a path to the saved punctuation labels file + - :obj:`pathlib.Path`: a path to the saved capitalization labels file """ - return { - 'input_ids': NeuralType(('B', 'T'), ChannelType()), - 'segment_ids': NeuralType(('B', 'T'), ChannelType()), - 'input_mask': NeuralType(('B', 'T'), MaskType()), - 'subtokens_mask': NeuralType(('B', 'T'), MaskType()), - 'quantities_of_preceding_words': NeuralType(('B',), Index()), - 'query_ids': NeuralType(('B',), Index()), - 'is_first': NeuralType(('B',), BoolType()), - 'is_last': NeuralType(('B',), BoolType()), - } - - def __init__( - self, queries: List[str], tokenizer: TokenizerSpec, max_seq_length: int = 128, step: int = 32, margin: int = 16 - ): - features = get_features_infer( - queries=queries, max_seq_length=max_seq_length, tokenizer=tokenizer, step=step, margin=margin - ) - self.all_input_ids: List[List[int]] = features[0] - self.all_segment_ids: List[List[int]] = features[1] - self.all_input_mask: List[List[int]] = features[2] - self.all_subtokens_mask: List[List[int]] = features[3] - self.all_quantities_of_preceding_words: List[int] = features[4] - self.all_query_ids: List[int] = features[5] - self.all_is_first: List[bool] = features[6] - self.all_is_last: List[bool] = features[7] + nemo_dir = self.label_info_save_dir / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT + punct_labels_file = nemo_dir / punct_labels_file_name + capit_labels_file = nemo_dir / capit_labels_file_name + save_label_ids(self.punct_label_ids, punct_labels_file) + save_label_ids(self.capit_label_ids, capit_labels_file) + return punct_labels_file, capit_labels_file def __len__(self) -> int: - return len(self.all_input_ids) - - def collate_fn( - self, batch: List[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]] - ) -> Tuple[ - torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Tuple[int], Tuple[int], Tuple[bool], Tuple[bool] - ]: - inp_ids, segment_ids, inp_mask, st_mask, n_preceding, query_ids, is_first, is_last = zip(*batch) - return ( - pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0), - pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0), - pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0), - pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0), - n_preceding, - query_ids, - is_first, - is_last, - ) + return len(self.batches) - def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]: - return ( - np.array(self.all_input_ids[idx]), - np.array(self.all_segment_ids[idx]), - np.array(self.all_input_mask[idx], dtype=np.float32), - np.array(self.all_subtokens_mask[idx]), - self.all_quantities_of_preceding_words[idx], - self.all_query_ids[idx], - self.all_is_first[idx], - self.all_is_last[idx], - ) + def collate_fn(self, batches: List[Dict[str, ArrayLike]]) -> Dict[str, torch.Tensor]: + """ + Return zeroth batch from ``batches`` list passed for collating and casts ``'segment_ids'``, ``'punct_labels'``, + ``'capit_labels'`` to types supported by + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`. + All output tensors have shape ``[Batch, Time]``. + + .. warning:: + A ``batch_size`` parameter of a PyTorch data loader and sampler has to be ``1``. + + Args: + batches (:obj:`List[Dict[str, ArrayLike]]`): a list containing 1 batch passed for collating + + Returns: + :obj:`Dict[str, torch.Tensor]`: a batch dictionary with following items (for detailed description of batch + items see method :meth:`__getitem__`): + + - ``'input_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor, + - ``'subtokens_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor, + - ``'punct_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor, + - ``'capit_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor, + - ``'segment_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor, + - ``'input_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor, + - ``'loss_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor. + """ + batch = {k: torch.as_tensor(v) for k, v in batches[0].items()} + batch['segment_ids'] = batch['segment_ids'].int() + batch['punct_labels'] = batch['punct_labels'].long() + batch['capit_labels'] = batch['capit_labels'].long() + return batch + + def __getitem__(self, idx: int) -> Dict[str, ArrayLike]: + """ + Return a batch with index ``idx``. The values of a batch dictionary are numpy arrays of identical shapes + ``[Batch, Time]``. Labels are identical for all tokens in a word. For example, if + + - word ``'Tokenization'`` is tokenized into tokens ``['token', 'ization']``, + - it is followed by comma, + + then punctuation labels are ``[',', ',']`` and capitalization labels are ``['U', 'U']`` (``'U'`` is a label + for words which start with upper case character). + + Args: + idx: an index of returned batch + + Returns: + :obj:`Dict[str, ArrayLike]`: a dictionary with items: + + - ``'input_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded tokens, + - ``'subtokens_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if they + correspond to first token in a word, + - ``'punct_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded punctuation + labels, + - ``'capit_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded capitalization + labels. + - ``'segment_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int8` array filled with zeros (BERT token types + in HuggingFace terminology) (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this + items is missing), + - ``'input_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if corresponding + token is not a padding token (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this + items is missing), + - ``'loss_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if loss is + computed for corresponding token. See more in description of constructor parameters + ``ignore_start_end``, ``ignore_extra_tokens`` (if ``self.add_masks_and_segment_ids_to_batch`` is + ``False``, then this items is missing). + """ + return self.batches[idx] diff --git a/nemo/collections/nlp/data/token_classification/punctuation_capitalization_infer_dataset.py b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_infer_dataset.py new file mode 100644 --- /dev/null +++ b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_infer_dataset.py @@ -0,0 +1,334 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +from torch.nn.utils.rnn import pad_sequence + +from nemo.collections.common.tokenizers import TokenizerSpec +from nemo.collections.nlp.data import get_stats +from nemo.core import Dataset +from nemo.core.neural_types import ChannelType, Index, MaskType, NeuralType +from nemo.core.neural_types.elements import BoolType +from nemo.utils import logging + + +def get_features_infer( + queries: List[str], + tokenizer: TokenizerSpec, + max_seq_length: int = 64, + step: Optional[int] = 8, + margin: Optional[int] = 16, +) -> Tuple[ + List[List[int]], List[List[int]], List[List[int]], List[List[int]], List[int], List[int], List[bool], List[bool], +]: + """ + Processes the data and returns features. + + Args: + queries: text sequences + tokenizer: such as AutoTokenizer + max_seq_length: max sequence length minus 2 for [CLS] and [SEP] + step: relative shift of consequent segments into which long queries are split. Long queries are split into + segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are + tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is + tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. + margin: number of subtokens near edges of segments which are not used for punctuation and capitalization + prediction. The first segment does not have left margin and the last segment does not have right + margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``, + ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments + ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], + ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions + computation, margins are removed. In the next list, subtokens which logits are not used for final + predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], + ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. + + Returns: + all_input_ids: list of input ids of all segments + all_segment_ids: token type ids of all segments + all_input_mask: attention mask to use for BERT model + all_subtokens_mask: masks out all subwords besides the first one + all_quantities_of_preceding_words: number of words in query preceding a segment. Used for joining + predictions from overlapping segments. + all_query_ids: index of a query to which segment belongs + all_is_first: is segment first segment in a query + all_is_last: is segment last segment in a query + """ + st = [] + stm = [] + sent_lengths = [] + for i, query in enumerate(queries): + subtokens, subtokens_mask = _get_subtokens_and_subtokens_mask(query, tokenizer) + sent_lengths.append(len(subtokens)) + st.append(subtokens) + stm.append(subtokens_mask) + _check_max_seq_length_and_margin_and_step(max_seq_length, margin, step) + if max_seq_length > max(sent_lengths) + 2: + max_seq_length = max(sent_lengths) + 2 + # If `max_seq_length` is greater than maximum length of input query, parameters ``margin`` and ``step`` are + # not used will not be used. + step = 1 + # Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS + length = max_seq_length - 2 + else: + # Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS + length = max_seq_length - 2 + step = min(length - margin * 2, step) + logging.info(f'Max length: {max_seq_length}') + get_stats(sent_lengths) + all_input_ids, all_segment_ids, all_subtokens_mask, all_input_mask, all_input_mask = [], [], [], [], [] + all_quantities_of_preceding_words, all_query_ids, all_is_first, all_is_last = [], [], [], [] + for q_i, query_st in enumerate(st): + q_inp_ids, q_segment_ids, q_subtokens_mask, q_inp_mask, q_quantities_of_preceding_words = [], [], [], [], [] + for i in range(0, max(len(query_st), length) - length + step, step): + subtokens = [tokenizer.cls_token] + query_st[i : i + length] + [tokenizer.sep_token] + q_inp_ids.append(tokenizer.tokens_to_ids(subtokens)) + q_segment_ids.append([0] * len(subtokens)) + q_subtokens_mask.append([False] + stm[q_i][i : i + length] + [False]) + q_inp_mask.append([True] * len(subtokens)) + q_quantities_of_preceding_words.append(np.count_nonzero(stm[q_i][:i])) + all_input_ids.append(q_inp_ids) + all_segment_ids.append(q_segment_ids) + all_subtokens_mask.append(q_subtokens_mask) + all_input_mask.append(q_inp_mask) + all_quantities_of_preceding_words.append(q_quantities_of_preceding_words) + all_query_ids.append([q_i] * len(q_inp_ids)) + all_is_first.append([True] + [False] * (len(q_inp_ids) - 1)) + all_is_last.append([False] * (len(q_inp_ids) - 1) + [True]) + return ( + list(itertools.chain(*all_input_ids)), + list(itertools.chain(*all_segment_ids)), + list(itertools.chain(*all_input_mask)), + list(itertools.chain(*all_subtokens_mask)), + list(itertools.chain(*all_quantities_of_preceding_words)), + list(itertools.chain(*all_query_ids)), + list(itertools.chain(*all_is_first)), + list(itertools.chain(*all_is_last)), + ) + + +def _check_max_seq_length_and_margin_and_step(max_seq_length: int, margin: int, step: int): + """ + Checks values of ``max_seq_length``, ``margin``, and ``step``. + Args: + max_seq_length: a segment length with ``[CLS]`` and ``[SEP]`` tokens + margin: a number of input tokens near edges of segments which are not used in punctuation and capitalization + prediction. + step: offset of consequent segments. + Returns: + None + """ + if max_seq_length < 3: + raise ValueError( + f"Parameter `max_seq_length={max_seq_length}` cannot be less than 3 because `max_seq_length` is a length " + f"of a segment with [CLS] and [SEP] tokens." + ) + if margin >= (max_seq_length - 2) // 2 and margin > 0 or margin < 0: + raise ValueError( + f"Parameter `margin` has to be not negative and less than `(max_seq_length - 2) // 2`. Don't forget about " + f"CLS and EOS tokens in the beginning and the end of segment. margin={margin}, " + f"max_seq_length={max_seq_length}" + ) + if step <= 0: + raise ValueError(f"Parameter `step` has to be positive whereas step={step}") + if step > max_seq_length - 2 - 2 * margin: + logging.warning( + f"Parameter step={step} is too big. It will be reduced to `min(max_seq_length, <maximum query length> + 2) " + f"- 2 - 2 * margin`." + ) + + +def _get_subtokens_and_subtokens_mask(query: str, tokenizer: TokenizerSpec) -> Tuple[List[str], List[bool]]: + """ + Tokenizes input query into subtokens and creates subtokens mask. Subtokens mask is an array of the same length as + subtokens array and contains zeros and ones in which. If element of mask equals 1, then corresponding subtoken in + subtokens array is first subtoken in some word + Args: + query: a string that will be tokenized + tokenizer: an instance of tokenizer + Returns: + subtokens: list of subtokens + subtokens_mask: list of ints + """ + words = query.strip().split() + subtokens = [] + subtokens_mask = [] + for j, word in enumerate(words): + word_tokens = tokenizer.text_to_tokens(word) + subtokens.extend(word_tokens) + subtokens_mask.append(True) + subtokens_mask.extend([False] * (len(word_tokens) - 1)) + return subtokens, subtokens_mask + + +class BertPunctuationCapitalizationInferDataset(Dataset): + """ + Creates dataset to use during inference for punctuation and capitalization tasks with a pretrained model. + For dataset to use during training with labels, see + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset` + and + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`. + + Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments + which then processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization + including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is shift + between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near + borders of segments which have only one side context. + + Args: + queries (:obj:`List[str]`): list of sequences. + tokenizer (:obj:`TokenizerSpec`): a tokenizer which was used for model training. It should have properties + ``cls_id``, ``sep_id``, ``unk_id``, ``pad_id``. + max_seq_length (:obj:`int`, `optional`, defaults to :obj:`128`): max sequence length which includes [CLS] and + [SEP] tokens + step (:obj:`int`, `optional`, defaults to :obj:`8`): relative shift of consequent segments into which long + queries are split. Long queries are split into segments which can overlap. Parameter ``step`` controls such + overlapping. Imagine that queries are tokenized into characters, ``max_seq_length=5``, and ``step=2``. In + such a case query "hello" is tokenized into segments + ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. + margin (:obj:`int`, `optional`, defaults to :obj:`16`): number of subtokens in the beginning and the end of + segments which are not used for prediction computation. The first segment does not have left margin and the + last segment does not have right margin. For example, if input sequence is tokenized into characters, + ``max_seq_length=5``, ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments + ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], + ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions + computation, margins are removed. In the next list, subtokens which logits are not used for final + predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], + ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. + """ + + @property + def output_types(self) -> Optional[Dict[str, NeuralType]]: + """Returns neural types of :meth:`collate_fn` output.""" + return { + 'input_ids': NeuralType(('B', 'T'), ChannelType()), + 'segment_ids': NeuralType(('B', 'T'), ChannelType()), + 'input_mask': NeuralType(('B', 'T'), MaskType()), + 'subtokens_mask': NeuralType(('B', 'T'), MaskType()), + 'quantities_of_preceding_words': NeuralType(('B',), Index()), + 'query_ids': NeuralType(('B',), Index()), + 'is_first': NeuralType(('B',), BoolType()), + 'is_last': NeuralType(('B',), BoolType()), + } + + def __init__( + self, queries: List[str], tokenizer: TokenizerSpec, max_seq_length: int = 64, step: int = 8, margin: int = 16 + ): + features = get_features_infer( + queries=queries, max_seq_length=max_seq_length, tokenizer=tokenizer, step=step, margin=margin + ) + self.all_input_ids: List[List[int]] = features[0] + self.all_segment_ids: List[List[int]] = features[1] + self.all_input_mask: List[List[int]] = features[2] + self.all_subtokens_mask: List[List[int]] = features[3] + self.all_quantities_of_preceding_words: List[int] = features[4] + self.all_query_ids: List[int] = features[5] + self.all_is_first: List[bool] = features[6] + self.all_is_last: List[bool] = features[7] + + def __len__(self) -> int: + return len(self.all_input_ids) + + def collate_fn( + self, batch: List[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]] + ) -> Tuple[ + torch.Tensor, + torch.Tensor, + torch.Tensor, + torch.Tensor, + Tuple[int, ...], + Tuple[int, ...], + Tuple[bool, ...], + Tuple[bool, ...], + ]: + """ + Collates samples into batches. + + Args: + batch (:obj:`List[tuple]`): a list of samples returned by :meth:`__getitem__` method. + + Returns: + :obj:`Tuple[torch.Tensor (x4), Tuple[int, ...] (x2), Tuple[bool, ...] (x2)]`: a tuple containing 8 + elements: + + - ``input_ids`` (:obj:`torch.Tensor`): an integer tensor of shape ``[Batch, Time]`` containing encoded + input text. + - ``segment_ids`` (:obj:`torch.Tensor`): an integer tensor of shape ``[Batch, Time]`` filled with zeros. + - ``input_mask`` (:obj:`torch.Tensor`): a boolean tensor of shape ``[Batch, Time]`` which elements are + ``True`` if corresponding token is not a padding token. + - ``subtokens_mask`` (:obj:`torch.Tensor`): a boolean tensor of shape ``[Batch, Time]`` which elements + are ``True`` if corresponding tken is the first token in a word. + - ``quantities_of_preceding_words`` (:obj:`Tuple[int, ...]`): a tuple containing number of words in + a query preceding current segment. + - ``query_ids`` (:obj:`Tuple[int, ...]`): a tuple containing indices of queries to which segments belong. + - ``is_first`` (:obj:`Tuple[bool, ...]`): a tuple booleans which elements are ``True`` if corresponding + segment is the first segment in a query. + - ``is_last`` (:obj:`Tuple[bool, ...]`): a tuple of booleans which elements are ``True`` if corresponding + segment is the last segment in a query. + """ + inp_ids, segment_ids, inp_mask, st_mask, n_preceding, query_ids, is_first, is_last = zip(*batch) + return ( + pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0), + pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0), + pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0), + pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0), + n_preceding, + query_ids, + is_first, + is_last, + ) + + def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]: + """ + Returns batch used for punctuation and capitalization inference. + + Args: + idx (:obj:`int`): a batch index + + Returns: + :obj:`Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]`: a tuple containing: + + - ``input_ids`` (:obj:`np.ndarray`): an integer numpy array of shape ``[Time]``. Ids of word + subtokens encoded using tokenizer passed in constructor ``tokenizer`` parameter. + - ``segment_ids`` (:obj:`np.ndarray`): an integer zeros numpy array of shape ``[Time]``. Indices + of segments for BERT model (token types in HuggingFace terminology). + - ``input_mask`` (:obj:`np.ndarray`): a boolean numpy array of shape ``[Time]``. An element of + this array is ``True`` if corresponding token is not padding token. + - ``subtokens_mask`` (:obj:`np.ndarray`): a boolean numpy array of shape ``[Time]``. An element + equals ``True`` if corresponding token is the first token in a word and ``False`` otherwise. For + example, if input query ``"language processing"`` is tokenized into + ``["[CLS]", "language", "process", "ing", "SEP"]``, then ``subtokens_mask`` will be + ``[False, True, True, False, False]``. + - ``quantities_of_preceding_words`` (:obj:`int`): a number of words preceding current segment in the + query to which the segment belongs. This parameter is used for uniting predictions from adjacent + segments. + - ``query_ids`` (:obj:`int`): an index of query to which the segment belongs + - ``is_first`` (:obj:`bool`): whether a segment is the first segment in a query. The left margin of + the first segment in a query is not removed. + - ``is_last`` (:obj:`bool`): whether a query is the last query in a query. The right margin of the last + segment in a query is not removed. + """ + return ( + np.array(self.all_input_ids[idx]), + np.array(self.all_segment_ids[idx]), + np.array(self.all_input_mask[idx], dtype=np.float32), + np.array(self.all_subtokens_mask[idx]), + self.all_quantities_of_preceding_words[idx], + self.all_query_ids[idx], + self.all_is_first[idx], + self.all_is_last[idx], + ) diff --git a/nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py new file mode 100644 --- /dev/null +++ b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py @@ -0,0 +1,1137 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import multiprocessing as mp +import os +import pickle +import re +import shutil +from collections import deque +from pathlib import Path +from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple, Type, Union + +import torch +import webdataset as wds +from joblib import Parallel, delayed +from numpy.typing import ArrayLike +from omegaconf import DictConfig +from torch.utils.data import IterableDataset + +from nemo.collections.common.tokenizers import TokenizerSpec +from nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset import ( + LABEL_ID_DIR_FOR_NEMO_CHECKPOINT, + BertPunctuationCapitalizationDataset, + Progress, + create_label_ids, + create_masks_and_segment_ids, + load_label_ids, + raise_not_equal_labels_error, +) +from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer +from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType +from nemo.utils import logging + +NUMBER_RE = "(0|[1-9][0-9]*)" +TAR_FRAGMENT_TMPL_IN_PROGRESS = "fragment{fragment_idx}.{file_idx}.tar" +TAR_FRAGMENT_TMPL_FINISHED = "fragment{fragment_idx}.num_batches{num_batches}.{file_idx}.tar" +TAR_FRAGMENT_TMPL_TO_REPACK = "fragment{fragment_idx}.num_batches{num_batches}.{file_idx}.tar.to_repack" +TAR_FRAGMENT_PATTERN_IN_PROGRESS = re.compile(f"fragment{NUMBER_RE}.{NUMBER_RE}.tar$") +TAR_FRAGMENT_PATTERN_FINISHED = re.compile(f"fragment{NUMBER_RE}.num_batches{NUMBER_RE}.{NUMBER_RE}.tar$") +TAR_FRAGMENT_PATTERN_TO_REPACK = re.compile(f"fragment{NUMBER_RE}.num_batches{NUMBER_RE}.{NUMBER_RE}.tar.to_repack$") + +DATASET_PARAMETERS_TMPL = "{prefix}.tokens{tokens_in_batch}.max_seq_length{max_seq_length}.{tokenizer}" +TAR_FINAL_TMPL = ".batches{num_batches}.{ctr}.tar" + +PROGRESS_REPORT_PERIOD = 10 ** 4 + +METADATA_PUNCT_LABEL_VOCAB_KEY = 'punct_label_vocab_file' +METADATA_CAPIT_LABEL_VOCAB_KEY = 'capit_label_vocab_file' +DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME = 'punct_label_vocab.csv' +DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME = 'capit_label_vocab.csv' + + +def count_lines_and_get_fragment_starting_positions( + file_name: Path, lines_per_dataset_fragment: int +) -> Tuple[int, List[int]]: + """ + Returns number of lines in a file and indices of fragment starting bytes. + + Args: + file_name: a path to a text or label file + lines_per_dataset_fragment: number of lines in a dataset fragment. The last fragment can contain less lines + + Returns: + num_lines: number of lines in a file + start_bytes: indices of fragment starting bytes + """ + pos = [0] + with file_name.open() as f: + i = 0 + line = f.readline() + while line: + i += 1 + if i % lines_per_dataset_fragment == 0: + pos.append(f.tell()) + line = f.readline() + return i, pos[:-1] if i % lines_per_dataset_fragment == 0 else pos + + +def get_fragment_start_bytes( + text_file: Path, labels_file: Path, lines_per_dataset_fragment: int +) -> Tuple[int, List[int], List[int]]: + """ + A function for calculating borders of dataset fragments. The function is used to split ``text_file`` and + ``labels_file`` for processing them in parallel. + + Args: + text_file: a path to a dataset source file + labels_file: a path to a dataset label file + lines_per_dataset_fragment: a number of lines in one fragment + + Returns: + num_lines: total number of elements in the dataset (number of lines in ``text_file``` and ``labels_file``) + text_start_bytes: indices of the first bytes of fragments in ``text_file`` + label_start_bytes: indices of the first bytes of fragments in ``labels_file`` + """ + logging.info( + f"Counting lines in files {text_file} and {labels_file} and creating segment borders. This may take " + f"considerable time. 86GB, 1.27b lines file was processed in 7 minutes." + ) + result = Parallel(n_jobs=2)( + delayed(count_lines_and_get_fragment_starting_positions)(file_name, lines_per_dataset_fragment) + for file_name in [text_file, labels_file] + ) + if result[0][0] != result[1][0]: + raise ValueError( + f"Text file {text_file} and label file {labels_file} contain different number of lines. Number of lines " + f"in text file: {result[0][0]}, number of lines in label file: {result[1][0]}." + ) + num_lines = result[0][0] + text_start_bytes, label_start_bytes = result[0][1], result[1][1] + assert len(text_start_bytes) == len(label_start_bytes) + return num_lines, text_start_bytes, label_start_bytes + + +def process_fragment( + text_file: Path, + labels_file: Path, + output_dir: Path, + text_start_pos: int, + label_start_pos: int, + lines_per_dataset_fragment: int, + max_seq_length: int, + tokens_in_batch: int, + num_batches_per_tarfile: int, + tokenizer_name: str, + tokenizer_model: Optional[Path], + vocab_file: Optional[Path], + merges_file: Optional[Path], + special_tokens: Dict[str, str], + use_fast_tokenizer: Optional[bool], + pad_label: str, + punct_label_ids: Dict[str, int], + capit_label_ids: Dict[str, int], + fragment_idx: int, + tokenization_progress_queue: mp.Queue, + batch_mark_up_progress_queue: mp.Queue, + batch_building_progress_queue: mp.Queue, + writing_to_tar_progress_queue: mp.Queue, +) -> None: + tokenizer = get_tokenizer( + tokenizer_name, + tokenizer_model=None if tokenizer_model is None else str(tokenizer_model), + vocab_file=None if vocab_file is None else str(vocab_file), + merges_file=None if merges_file is None else str(merges_file), + special_tokens=special_tokens, + use_fast=use_fast_tokenizer, + ) + tmp_text = output_dir / f'tmp_text_{fragment_idx}.txt' + tmp_labels = output_dir / f'tmp_labels_{fragment_idx}.txt' + with text_file.open() as tf, labels_file.open() as lf, tmp_text.open('w') as otf, tmp_labels.open('w') as olf: + tf.seek(text_start_pos) + lf.seek(label_start_pos) + for _ in range(lines_per_dataset_fragment): + text_line = tf.readline() + if not text_line: + break + otf.write(text_line) + olf.write(lf.readline()) + dataset = BertPunctuationCapitalizationDataset( + tmp_text, + tmp_labels, + max_seq_length, + tokenizer, + tokens_in_batch=tokens_in_batch, + pad_label=pad_label, + punct_label_ids=punct_label_ids, + capit_label_ids=capit_label_ids, + n_jobs=0, + use_cache=False, + add_masks_and_segment_ids_to_batch=False, + verbose=False, + tokenization_progress_queue=tokenization_progress_queue, + batch_mark_up_progress_queue=batch_mark_up_progress_queue, + batch_building_progress_queue=batch_building_progress_queue, + ) + tmp_text.unlink() + tmp_labels.unlink() + tar_ctr = 0 + current_file_name = output_dir / TAR_FRAGMENT_TMPL_IN_PROGRESS.format(fragment_idx=fragment_idx, file_idx=tar_ctr) + current_num_batches = 0 + sink = wds.TarWriter(str(current_file_name)) + progress_made = 0 + for batch_i, batch in enumerate(dataset): + sink.write({"__key__": f"fragment-{fragment_idx}-batch-{batch_i}", "batch.pyd": batch}) + current_num_batches += 1 + progress_made += len(batch['input_ids']) + if current_num_batches % num_batches_per_tarfile == 0: + sink.close() + current_file_name.rename( + output_dir + / TAR_FRAGMENT_TMPL_FINISHED.format( + fragment_idx=fragment_idx, num_batches=current_num_batches, file_idx=tar_ctr + ) + ) + writing_to_tar_progress_queue.put(progress_made) + progress_made = 0 + tar_ctr += 1 + current_file_name = output_dir / TAR_FRAGMENT_TMPL_IN_PROGRESS.format( + fragment_idx=fragment_idx, file_idx=tar_ctr + ) + current_num_batches = 0 + sink = wds.TarWriter(str(current_file_name)) + sink.close() + writing_to_tar_progress_queue.put(progress_made) + if progress_made > 0: + new_file_name = output_dir / TAR_FRAGMENT_TMPL_TO_REPACK.format( + fragment_idx=fragment_idx, num_batches=current_num_batches, file_idx=tar_ctr + ) + current_file_name.rename(new_file_name) + else: + current_file_name.unlink() + if fragment_idx == 0: + punct_label_ids_file, capit_label_ids_file = dataset.save_labels_and_get_file_paths( + DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME, DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME + ) + punct_label_ids_file.rename(output_dir / DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME) + capit_label_ids_file.rename(output_dir / DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME) + shutil.rmtree(punct_label_ids_file.parent) + + +def remove_unexpected_files_and_dirs(output_dir: Path, output_file_tmpl: str, metadata_file_name: Path) -> None: + """ + This function removes all files with names which may be used in the dataset creation. + + Args: + output_dir: a path to directory where removal is performed + output_file_tmpl: a format string for a name of final tar file. Must include fields ``ctr`` for number of the + file and ``num_batches`` for number of batches in the file. + metadata_file_name: a metadata file name + """ + if not output_dir.is_dir(): + return + tar_final_pattern = re.compile(output_file_tmpl.format(ctr=NUMBER_RE, num_batches=NUMBER_RE)) + unexpected_tar_files = [ + path + for path in output_dir.iterdir() + if any( + [ + p.match(path.name) is not None + for p in [ + TAR_FRAGMENT_PATTERN_IN_PROGRESS, + TAR_FRAGMENT_PATTERN_FINISHED, + TAR_FRAGMENT_PATTERN_TO_REPACK, + tar_final_pattern, + ] + ] + ) + ] + if unexpected_tar_files: + logging.warning( + f"Found {len(unexpected_tar_files)} unexpected tar files in the output directory {output_dir}. " + f"All of them are going to be removed. The files match one of 3 patterns: " + f"'{TAR_FRAGMENT_PATTERN_IN_PROGRESS.pattern}', '{TAR_FRAGMENT_PATTERN_FINISHED.pattern}', " + f"'{tar_final_pattern.pattern}'. The first unexpected files: " + f"{', '.join([str(f) for f in unexpected_tar_files[:3]])}." + ) + for fn in unexpected_tar_files: + fn.unlink() + if metadata_file_name.exists(): + logging.warning(f"Found metadata file {metadata_file_name}. It is going to be removed.") + metadata_file_name.unlink() + punct_label_ids = output_dir / DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME + capit_label_ids = output_dir / DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME + if punct_label_ids.exists(): + logging.warning(f"Found unexpected punctuation label file {punct_label_ids}. It is going to be removed.") + punct_label_ids.unlink() + if capit_label_ids.exists(): + logging.warning(f"Found unexpected capitalization label file {capit_label_ids}. It is going to be removed.") + capit_label_ids.unlink() + + +def collect_unique_labels_from_fragment( + labels_file: Path, start_pos: int, lines_per_dataset_fragment: int, progress_queue: mp.Queue, fragment_idx: int +) -> Tuple[Set[str], Set[str]]: + """ + Returns a set of unique punctuation labels and a set of unique capitalization labels. + + Args: + labels_file: a path to a file with labels + start_pos: an index of the first byte of a fragment in ``labels_file`` + lines_per_dataset_fragment: number of lines in dataset fragment. In the last fragment there can be less lines. + progress_queue: a queue for reporting number of processed lines + fragment_idx: a processed fragment index + + Returns: + unique_punct: a set of unique punctuation labels + unique_capit: a set of unique capitalization labels + """ + unique_punct, unique_capit = set(), set() + with labels_file.open() as f: + f.seek(start_pos) + progress_report = 0 + for i in range(lines_per_dataset_fragment): + line = f.readline() + if not line: + break + pairs = line.split() + if not all([len(p) == 2 for p in pairs]): + broken_pairs = [i for i, p in enumerate(pairs) if len(p) != 2] + raise ValueError( + f"Found broken labels line in number {fragment_idx * lines_per_dataset_fragment + i} in file " + f"{labels_file}. Indices of broken pairs of labels: {broken_pairs}" + ) + punct, capit = zip(*pairs) + unique_punct.update(punct) + unique_capit.update(capit) + progress_report += 1 + if progress_report >= PROGRESS_REPORT_PERIOD: + progress_queue.put(progress_report) + progress_report = 0 + progress_queue.put(progress_report) + return unique_punct, unique_capit + + +def create_label_dictionaries( + labels_file: Path, + text_start_bytes: List[int], + num_lines: int, + lines_per_dataset_fragment: int, + pad_label: str, + n_jobs: int, +) -> Tuple[Dict[str, int], Dict[str, int]]: + """ + Creates punctuation and capitalization label ids dictionaries based on labels present in ``labels_file``. + + Args: + labels_file: a path to file with labels + text_start_bytes: indices of first bytes of fragments in ``labels_file`` + num_lines: total number of lines in ``labels_file`` + lines_per_dataset_fragment: number of lines in dataset fragments. The last fragment can have less lines + pad_label: a label used for padding and for absence of punctuation and capitalization + n_jobs: a number of fragments processed in parallel + + Returns: + punct_label_ids: a dictionary with punctuation label ids + capit_label_ids: a dictionary with capitalization label ids + """ + with Progress(num_lines, "Creating label dictionary", "line") as progress_queues: + result = Parallel(n_jobs=min(n_jobs, len(text_start_bytes)))( + delayed(collect_unique_labels_from_fragment)( + labels_file, start_pos, lines_per_dataset_fragment, *progress_queues, fragment_idx + ) + for fragment_idx, start_pos in enumerate(text_start_bytes) + ) + unique_punct, unique_capit = zip(*result) + unique_punct = set().union(*unique_punct) + unique_capit = set().union(*unique_capit) + return create_label_ids(unique_punct, pad_label), create_label_ids(unique_capit, pad_label) + + +def check_label_ids(pad_label: str, punct_label_ids: Dict[str, int], capit_label_ids: Dict[str, int]) -> None: + """ + A function for checking that pad label has zeroth id in ``punct_label_dis`` and ``capit_label_ids`` dictionaries. + Args: + pad_label: a pad label + punct_label_ids: a dictionary with punctuation label ids + capit_label_ids: a dictionary with capitalization label ids + """ + msg = "Parameter `pad_label` has to have id 0 in dictionary `{param_name}` whereas it has id {id_}." + ( + '' if len(pad_label) > 10 else f" pad_label='{pad_label}'" + ) + if punct_label_ids is not None: + if punct_label_ids[pad_label] != 0: + raise ValueError(msg.format(param_name='punct_label_ids', id_=punct_label_ids[pad_label])) + if capit_label_ids is not None: + if capit_label_ids[pad_label] != 0: + raise ValueError(msg.format(param_name='capit_label_ids', id_=capit_label_ids[pad_label])) + + +def process_error(msg: str, error_class_or_function: Union[Type[Exception], Callable[[str], Any]]) -> None: + if issubclass(error_class_or_function, Exception): + raise error_class_or_function(msg) + if callable(error_class_or_function): + error_class_or_function(msg) + raise ValueError( + f"Parameter `error_class_or_function` has to be a subclass of `Exception` or a function." + f"Given {type(error_class_or_function)}" + ) + + +def check_labels_for_being_unique_before_building_label_ids( + pad_label: str, + other_labels: List[str], + pad_label_name: str, + other_labels_name: str, + error_class_or_function: Union[Type[Exception], Callable[[str], Any]], +) -> None: + """ + A function for checking that that all labels are unique. + + Args: + pad_label: a pad label + other_labels: a list of labels except for the pad label + pad_label_name: a name of the pad label used in error message + other_labels_name: a name of other labels used in error message + error_class_or_function: a class of an exception which is raised if there is a problem with labels. + Alternatively it can be a function for handling exceptions, for example ``argparse.ArgumentParser.error``. + Such a function has to take one argument -- error message. + """ + for i, lbl in enumerate(other_labels): + if lbl == pad_label: + msg = f"Label number {i} in parameter `{other_labels_name}` is equal to `{pad_label_name}`." + process_error(msg, error_class_or_function) + for i in range(len(other_labels) - 1): + for lbl in other_labels[i + 1 :]: + if lbl == other_labels[i]: + msg = f"Label number {i} occurs at least 2 times in parameter `{other_labels_name}`." + process_error(msg, error_class_or_function) + + +def build_label_ids_from_list_of_labels(pad_label: str, other_labels: List[str]) -> Dict[str, int]: + """ + Builds label ids dictionary from pad label and list of other labels. Used for parsing command line arguments. + Args: + pad_label: a pad label + other_labels: list of labels except for the pad label + + Returns: + a dictionary with label ids + """ + check_labels_for_being_unique_before_building_label_ids( + pad_label, other_labels, 'pad_label', 'other_labels', ValueError + ) + ids = {pad_label: 0} + for lbl in other_labels: + ids[lbl] = len(ids) + return ids + + +def get_label_dictionaries( + labels_file: Path, + start_bytes: List[int], + num_lines: int, + lines_per_dataset_fragment: int, + pad_label: str, + punct_label_ids: Optional[Dict[str, int]], + capit_label_ids: Optional[Dict[str, int]], + punct_label_vocab_file: Optional[Path], + capit_label_vocab_file: Optional[Path], + n_jobs: int, +) -> Tuple[Dict[str, int], Dict[str, int]]: + """ + Return label ids if the label ids are present in parameters ``punct_label_ids``, ``capit_label_ids``, + ``punct_label_vocab_file``, ``capit_label_vocab_file``. Otherwise, label ids are created using ``labels_file``. + + Args: + labels_file: a path to file with labels. Labels have to be given in the format described in + https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format + start_bytes: a list of positions in ``labels_file`` at which fragments start. Parameter ``start_bytes`` is used + for creating labels for several fragments in parallel + num_lines: total number of lines in ``labels_file``. Parameter ``num_lines`` is used for showing progress of + label ids collection + lines_per_dataset_fragment: number of lines in a dataset fragment + pad_label: a label used for padding and also neutral label showing there is no punctuation and capitalization. + Label ``pad_label`` has to have id ``0`` in parameters ``punct_label_ids``, ``capit_label_ids``, + ``punct_label_vocab_file``, ``capit_label_vocab_file`` if these parameters are provided. + punct_label_ids: a dictionary with punctuation label ids. Pad label has to have id ``0``. No more than 1 of + parameters ``punct_label_ids`` and ``punct_label_vocab_file`` can be provided. + capit_label_ids: a dictionary with capitalization label ids. Pad label has to have id ``0``. No more than 1 of + parameters ``capit_label_ids`` and ``capit_label_vocab_file`` can be provided. + punct_label_vocab_file: a text file with punctuation labels. Every line in the file contains 1 label. Pad label + has to be in the first line. No more than 1 of parameters ``punct_label_ids`` and + ``punct_label_vocab_file`` can be provided. + capit_label_vocab_file: a text file with capitalization labels. Every line in the file contains 1 label. Pad + label has to be in the first line. No more than 1 of parameters ``capit_label_ids`` and + ``capit_label_vocab_file`` can be provided. + n_jobs: a number of fragments processed in parallel + + Returns: + punct_label_ids: a dictionary with punctuation label ids + capit_label_ids: a dictionary with capitalization label ids + """ + if punct_label_ids is not None and punct_label_vocab_file is not None: + raise ValueError("You can provide at most one of parameters `punct_label_ids` and `punct_label_vocab_file`.") + if capit_label_ids is not None and capit_label_vocab_file is not None: + raise ValueError("You can provide at most one of parameters `capit_label_ids` and `capit_label_vocab_file`.") + if punct_label_ids is None and punct_label_vocab_file is not None: + punct_label_ids = load_label_ids(punct_label_vocab_file) + if capit_label_ids is None and capit_label_vocab_file is not None: + capit_label_ids = load_label_ids(capit_label_vocab_file) + check_label_ids(pad_label, punct_label_ids, capit_label_ids) + if punct_label_ids is None or capit_label_ids is None: + _punct_label_ids, _capit_label_ids = create_label_dictionaries( + labels_file, start_bytes, num_lines, lines_per_dataset_fragment, pad_label, n_jobs + ) + if punct_label_ids is None: + punct_label_ids = _punct_label_ids + if capit_label_ids is None: + capit_label_ids = _capit_label_ids + return punct_label_ids, capit_label_ids + + +def decode_pyd(key: str, value: bytes) -> Any: + """ + Used for decoding batch loaded by ``webdataset`` from tar files. + Args: + key: name of a batch + value: pickled batch + + Returns: + decoded batch + """ + return pickle.loads(value) + + +def repack_tar_files_with_not_enough_batches(output_dir: Path, num_batches_per_tarfile: int) -> None: + f""" + It is possible that number of batches in a fragment is not evenly divisible by ``num_batches_per_tarfile``. + In such a case excess batches are put in a tar file which matches a pattern + ``fragment(0|[1-9][0-9]*).num_batches(0|[1-9][0-9]*).(0|[1-9][0-9]*).tar.to_repack``. Such files are repacked by + ``repack_tar_files_with_not_enough_batches`` function into tar files with correct ``num_batches_per_tarfile`` + batches each. If there is no enough batches in repacked files, then up to ``num_batches_per_tarfile - 1`` + remaining batches may be discarded. + + Args: + output_dir: a path to the output directory which contains files to repack and where new files are saved + num_batches_per_tarfile: a number of batches in 1 tar file. If number of batches in files matching a pattern + ``fragment(0|[1-9][0-9]*).num_batches(0|[1-9][0-9]*).(0|[1-9][0-9]*).tar.to_repack`` is not evenly + divisible by ``num_batches_per_tarfile`` excess batches are discarded. + """ + files_to_repack_with_matches = [ + (path, TAR_FRAGMENT_PATTERN_TO_REPACK.match(path.name)) + for path in output_dir.iterdir() + if TAR_FRAGMENT_PATTERN_TO_REPACK.match(path.name) is not None + ] + files_to_repack_with_matches = sorted(files_to_repack_with_matches, key=lambda x: int(x[1].group(3))) + logging.info(f"Found {len(files_to_repack_with_matches)} files for repacking.") + files_to_repack_with_matches = deque(files_to_repack_with_matches) + total_batches_in_repacked_files = 0 + initial_number_of_files_to_repack = len(files_to_repack_with_matches) + pop_file_ds = None + new_file_sink = None + new_file_num_batches = 0 + while files_to_repack_with_matches: + assert pop_file_ds is None or new_file_sink is None + if new_file_sink is None: + # `append_file` is a file which content will serve as a start for new tar file. `append_file` content is + # copied into a `new_file` and then content of other files needing repacking is appended to content of + # `new_file`. + append_file, match = files_to_repack_with_matches.popleft() + new_file = append_file.parent / TAR_FRAGMENT_TMPL_FINISHED.format( + fragment_idx=match.group(1), num_batches=num_batches_per_tarfile, file_idx=match.group(3) + ) + new_file_sink = wds.TarWriter(str(new_file)) + append_ds_to_rewrite = ( + wds.WebDataset(urls=[str(append_file)], nodesplitter=None) + .decode(wds.handle_extension('.pyd', decode_pyd)) + .to_tuple('__key__', 'batch.pyd') + ) + for key, batch in iter(append_ds_to_rewrite): + new_file_sink.write({"__key__": key, "batch.pyd": batch}) + new_file_num_batches += 1 + total_batches_in_repacked_files += 1 + assert total_batches_in_repacked_files < initial_number_of_files_to_repack * num_batches_per_tarfile + assert new_file_num_batches == int(match.group(2)), ( + f"Number of batches {new_file_num_batches} in {append_file} is different from number of batches " + f"{match.group(2)} in repacked tar file with name {append_file}." + ) + append_file.unlink() + if files_to_repack_with_matches and pop_file_ds is None: + pop_file, _ = files_to_repack_with_matches.pop() + pop_file_ds = ( + wds.WebDataset(urls=[str(pop_file)], nodesplitter=None) + .decode(wds.handle_extension('.pyd', decode_pyd)) + .to_tuple('__key__', 'batch.pyd') + ) + pop_file_ds = iter(pop_file_ds) + if pop_file_ds is not None and new_file_sink is not None: + while new_file_num_batches < num_batches_per_tarfile: + try: + key, batch = next(pop_file_ds) + except StopIteration: + pop_file_ds = None + pop_file.unlink() + break + new_file_sink.write({"__key__": key, "batch.pyd": batch}) + total_batches_in_repacked_files += 1 + assert total_batches_in_repacked_files < initial_number_of_files_to_repack * num_batches_per_tarfile + new_file_num_batches += 1 + if new_file_num_batches >= num_batches_per_tarfile: + assert new_file_num_batches == num_batches_per_tarfile + new_file_sink.close() + new_file_sink = None + new_file_num_batches = 0 + if new_file_sink is not None: + new_file_sink.close() + new_file.unlink() + logging.info(f"Discarded {new_file_num_batches} batches.") + if pop_file_ds is not None: + pop_file.unlink() + logging.info(f"Repacked {total_batches_in_repacked_files} batches from short tar files") + + +def create_metadata_file( + output_dir: Path, output_file_tmpl: str, metadata_file_name: Path, num_batches_per_tarfile: int +) -> None: + """ + Rename tar files according to template ``output_file_tmpl`` and save metadata file. + Args: + output_dir: a path to directory which contains initial tar files and where renamed tar files are saved + output_file_tmpl: a template of a new tar file name + metadata_file_name: a path to a file into which metadata is going to be saved + num_batches_per_tarfile: a required number of batches in tar files. Used for checking that present tar files + have correct number of batches + """ + metadata = {"num_batches": 0, "tar_files": []} + for i, fn in enumerate([fn for fn in output_dir.iterdir() if TAR_FRAGMENT_PATTERN_FINISHED.match(fn.name)]): + nb = int(TAR_FRAGMENT_PATTERN_FINISHED.match(fn.name).group(2)) + assert nb == num_batches_per_tarfile + new_name = output_dir / output_file_tmpl.format(ctr=i, num_batches=nb) + fn.rename(new_name) + metadata['tar_files'].append(new_name.name) + metadata["num_batches"] += nb + metadata[METADATA_PUNCT_LABEL_VOCAB_KEY] = DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME + metadata[METADATA_CAPIT_LABEL_VOCAB_KEY] = DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME + logging.info(f"{metadata['num_batches']} batches are in tarred dataset with metadata file {metadata_file_name}") + with metadata_file_name.open('w') as f: + json.dump(metadata, f, indent=2) + + +def create_tarred_dataset( + text_file: Union[os.PathLike, str], + labels_file: Union[os.PathLike, str], + output_dir: Union[os.PathLike, str], + max_seq_length: int, + tokens_in_batch: int, + lines_per_dataset_fragment: int, + num_batches_per_tarfile: int, + tokenizer_name: str, + tokenizer_model: Optional[Union[os.PathLike, str]] = None, + vocab_file: Optional[Union[os.PathLike, str]] = None, + merges_file: Optional[Union[os.PathLike, str]] = None, + special_tokens: Optional[Dict[str, str]] = None, + use_fast_tokenizer: Optional[bool] = False, + pad_label: str = 'O', + punct_label_ids: Optional[Dict[str, int]] = None, + capit_label_ids: Optional[Dict[str, int]] = None, + punct_label_vocab_file: Optional[Union[os.PathLike, str]] = None, + capit_label_vocab_file: Optional[Union[os.PathLike, str]] = None, + tar_file_prefix: Optional[str] = 'punctuation_capitalization', + n_jobs: Optional[int] = None, +) -> None: + """ + Creates tarred dataset from ``text_file`` and ``labels_file``. A tarred dataset allows to train on large amounts of + data without storing it all into memory simultaneously. You may use these function directly or try script + `examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py + <https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_. + + Tarred dataset is a directory which contains metadata file, tar files with batches, + ``punct_label_vocab.csv`` and ``capit_label_vocab.csv`` files. + + Metadata file is a JSON file with 4 items: ``'num_batches'``, ``'tar_files'``, ``'punct_label_vocab_file'``, + ``'capit_label_vocab_file'``. The item ``'num_batches'`` (``int``) is a total number of batches in tarred dataset. + ``'tar_files'`` is a list of paths to tar files relative to directory containing the metadata file. The items + ``'punct_label_vocab_file'`` and ``'capit_label_vocab_file'`` are correspondingly paths to punctuation and + capitalization label vocabulary files. These paths are relative to directory containing the metadata file. + + Every tar file contains objects written using ``webdataset.TarWriter``. Each object is a dictionary with two items: + ``'__key__'`` and ``'batch.pyd'``. ``'__key__'`` is a name of a batch and ``'batch.pyd'`` is a pickled dictionary + which contains ``'input_ids'``, ``'subtokens_mask'``, ``'punct_labels'``, ``'capit_labels'``. ``'input_ids'`` is an + array containing ids of source tokens, ``'subtokens_mask'`` is a boolean array showing first tokens in words, + ``'punct_labels'`` and ``'capit_labels'`` are arrays with ids of labels. + + Metadata file should be passed to constructor of :class:`BertPunctuationCapitalizationTarredDataset` and the + instance of the class will handle iteration and constructing masks and token types for BERT model. + + Args: + text_file (:obj:`Union[os.PathLike, str]`): a path to a file with dataset source. Dataset source is lowercased + text without punctuation. Number of lines in ``text_file`` has to be equal to the number of lines in + ``labels_file``. + labels_file (:obj:`Union[os.PathLike, str]`): a path to a file with labels. Labels are given in the format + described in :ref:`NeMo Data Format<nemo-data-format-label>`. + output_dir (:obj:`Union[os.PathLike, str]`): a path to a directory where metadata file, tar files and + ``'punct_label_ids.csv'`` and ``'capit_label_ids.csv'`` files are saved. + max_seq_length (:obj:`int`): Maximum number of subtokens in an input sequence. A source sequence which contains + too many subtokens is clipped to ``max_seq_length - 2`` subtokens and then [CLS] token is prepended to the + clipped sequence and [SEP] token is appended to the clipped sequence. The clipping is performed via removal + of subtokens in the end of a source sequence. + tokens_in_batch (:obj:`int`): maximum number of tokens in a batch including [CLS], [SEP], [UNK], and [PAD] + tokens. Before packing into batches source sequences are sorted by number of tokens in order to reduce + number of pad tokens. So the number of samples in a batch may vary. + lines_per_dataset_fragment (:obj:`int`): a number of lines processed by one worker during creation of tarred + dataset. A worker tokenizes ``lines_per_dataset_fragment`` lines and keeps in RAM tokenized text labels + before packing them into batches. Reducing ``lines_per_dataset_fragment`` leads to reducing of the amount + of memory used by this function. + num_batches_per_tarfile (:obj:`int`): a number of batches saved in a tar file. If you increase + ``num_batches_per_tarfile``, then there will be less tar files in the dataset. There cannot be less then + ``num_batches_per_tarfile`` batches in a tar file, and all excess batches are removed. Maximum number of + discarded batches is ``num_batches_per_tarfile - 1``. + tokenizer_name (:obj:`str`): a name of the tokenizer used for tokenization of source sequences. Possible + options are ``'sentencepiece'``, ``'word'``, ``'char'``, HuggingFace tokenizers. For more options see + function ``nemo.collections.nlp.modules.common.get_tokenizer``. The tokenizer must have properties + ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``. + tokenizer_model (:obj:`Union[os.PathLike, str]`, `optional`): a path to a tokenizer model required for + ``'sentencepiece'`` tokenizer. + vocab_file (:obj:`Union[os.PathLike, str]`, `optional`): a path to a vocabulary file which can be used in + ``'word'``, ``'char'``, and HuggingFace tokenizers. + merges_file (:obj:`Union[os.PathLike, str]`, `optional`): a path to merges file which can be used in + HuggingFace tokenizers. + special_tokens (:obj:`Dict[str, str]`, `optional`): a dictionary with special tokens passed to constructors of + ``'char'``, ``'word'``, ``'sentencepiece'``, and various HuggingFace tokenizers. + use_fast_tokenizer (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to use fast HuggingFace + tokenizer. + pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): a pad label both for punctuation and + capitalization. This label is also a neutral label (used for marking words which do not need punctuation + and capitalization). + punct_label_ids (:obj:`Dict[str, int]`, `optional`): a dictionary which keys are punctuation labels and values + are label ids. The pad label ``pad_label`` has to have id ``0``. You can provide at most one of parameters + ``punct_label_ids`` and ``punct_label_vocab_file``. If none of parameters ``punct_label_ids`` and + ``punct_label_vocab_file`` is provided, then punctuation label ids will be inferred from ``labels_file`` + file. + capit_label_ids (:obj:`Dict[str, int]`, `optional`): same as ``punct_label_ids`` for capitalization labels. + punct_label_vocab_file (:obj:`Union[os.PathLike, str]`, `optional`): a path to a file with punctuation labels. + These labels include pad label. The pad label has to be the first label in the file. Each label is written + on a separate line. Alternatively you can use ``punct_labels_ids`` parameter. If none of parameters + ``punct_labels_ids`` and ``punct_label_vocab_file`` is provided, then punctuation label ids will be + inferred from ``labels_file`` file. + capit_label_vocab_file (:obj:`Union[os.PathLike, str]`, `optional`): same as ``punct_label_vocab_file`` for + capitalization labels. + tar_file_prefix (:obj:`str`, `optional`, defaults :obj:`'punctuation_capitalization'`): a string from which tar + file names start. + n_jobs (:obj:`int`, `optional`): a number of workers for creating tarred dataset. If ``None``, then ``n_jobs`` + is equal to number of CPUs. + """ + if n_jobs is None: + n_jobs = mp.cpu_count() + text_file, labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser() + output_dir = Path(output_dir).expanduser() + ds_params_str = DATASET_PARAMETERS_TMPL.format( + prefix=tar_file_prefix, + tokens_in_batch=tokens_in_batch, + max_seq_length=max_seq_length, + tokenizer=tokenizer_name, + ) + output_file_tmpl = ds_params_str + TAR_FINAL_TMPL + metadata_file_name = output_dir / ('metadata.' + ds_params_str + '.json') + remove_unexpected_files_and_dirs(output_dir, output_file_tmpl, metadata_file_name) + num_lines, text_start_bytes, label_start_bytes = get_fragment_start_bytes( + text_file, labels_file, lines_per_dataset_fragment + ) + if text_start_bytes: + output_dir.mkdir(parents=True, exist_ok=True) + else: + raise ValueError(f"Both {labels_file} and {text_file} are empty. Tarred dataset cannot be created.") + punct_label_ids, capit_label_ids = get_label_dictionaries( + labels_file, + label_start_bytes, + num_lines, + lines_per_dataset_fragment, + pad_label, + punct_label_ids, + capit_label_ids, + punct_label_vocab_file, + capit_label_vocab_file, + n_jobs, + ) + + with Progress( + num_lines, ["Tokenization", "Batch mark up", "Batch building", "Writing tarred dataset"], "query" + ) as progress_queues: + Parallel(n_jobs=min(n_jobs, len(text_start_bytes)))( + delayed(process_fragment)( + text_file, + labels_file, + output_dir, + text_start_pos, + label_start_pos, + lines_per_dataset_fragment, + max_seq_length, + tokens_in_batch, + num_batches_per_tarfile, + tokenizer_name, + None if tokenizer_model is None else Path(tokenizer_model).expanduser(), + None if vocab_file is None else Path(vocab_file).expanduser(), + None if merges_file is None else Path(merges_file).expanduser(), + special_tokens, + use_fast_tokenizer, + pad_label, + punct_label_ids, + capit_label_ids, + fragment_idx, + *progress_queues, + ) + for fragment_idx, (text_start_pos, label_start_pos) in enumerate(zip(text_start_bytes, label_start_bytes)) + ) + repack_tar_files_with_not_enough_batches(output_dir, num_batches_per_tarfile) + create_metadata_file(output_dir, output_file_tmpl, metadata_file_name, num_batches_per_tarfile) + + +class BertPunctuationCapitalizationTarredDataset(IterableDataset): + """ + Punctuation capitalization dataset which allows not to load all data in memory simultaneously. A tarred dataset + is created from text and label files using script + `examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py + <https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_ + or function + :func:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.create_tarred_dataset`. + + Args: + metadata_file (:obj:`Union[os.PathLike, str]`): a path to tarred dataset metadata file. Metadata file and files + referenced in metadata file are created by + `examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py + <https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_. + Metadata file is a JSON file which contains ``'num_batches'``, ``'tar_files'``, + ``'punct_label_vocab_file'``, ``'capit_label_vocab_file'`` items. The first item is total number of batches + in a dataset, the second is a list of paths to tar files relative to directory containing + ``metadata_file``. Items ``'punct_label_vocab_file'`` and ``'capit_label_vocab_file'`` are paths to + ``.csv`` files which contain unique punctuation an capitalization label vocabularies. Vocabulary file paths + are relative to directory containing the ``metadata_file``. Each line in ``'punct_label_vocab_file'`` and + ``'capit_label_vocab_file'`` contains 1 label. The first lines in ``'punct_label_vocab_file'`` and + ``'capit_label_vocab_file'`` files are neutral labels which also serve as pad labels. Neutral labels for + punctuation and capitalization must be equal to the ``pad_label`` parameter. + tokenizer (:obj:`TokenizerSpec`): a tokenizer instance used for tokenization of dataset source. A tokenizer + instance is used for getting ids of [CLS], [PAD], and [SEP] tokens which are used for masks creation. + pad_label (:obj:`str`): a label that is used for padding and for absence of punctuation or + capitalization. Used for checking items ``'punct_label_vocab'`` and ``'capit_label_vocab'`` of dictionary + in ``metadata_file``. + label_info_save_dir (:obj:`Union[os.PathLike, str]`, `optional`): a path to a directory where label + vocabularies are copied when method :meth:`save_labels_and_get_file_paths` is called. This parameter is + useful if tarred dataset directory is read-only. + ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to use only first token in a + word for loss computation and training. If set to ``True``, then loss will be computed only for the first + tokens of words. + ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to compute loss for [CLS] and + [SEP] tokens. If set to ``True``, then loss will not be computed for [CLS] and [SEP] tokens. + world_size (:obj:`int`, `optional`, defaults to :obj:`1`): a number of processes used for model training. It is + used together with a ``global_rank`` parameter to decide which tar files will be used in the current + process. + global_rank (:obj:`int`, `optional`, defaults to :obj:`0`): a number of current process in the pool of workers + used for model training. It is used together with ``world_size`` parameter to decide which tar files will + be used in the current process. + shuffle_n (:obj:`int`, `optional`, defaults to :obj:`1`): a number of shuffled batches in a buffer. + ``shuffle_n`` batches are loaded into memory, shuffled, and then yielded by a dataset instance. + """ + + @property + def output_types(self) -> Optional[Dict[str, NeuralType]]: + """Returns neural types of batches yielded by this dataset.""" + return { + 'input_ids': NeuralType(('B', 'T'), ChannelType()), + 'segment_ids': NeuralType(('B', 'T'), ChannelType()), + 'input_mask': NeuralType(('B', 'T'), MaskType()), + 'subtokens_mask': NeuralType(('B', 'T'), MaskType()), + 'loss_mask': NeuralType(('B', 'T'), MaskType()), + 'punct_labels': NeuralType(('B', 'T'), LabelsType()), + 'capit_labels': NeuralType(('B', 'T'), LabelsType()), + } + + def __init__( + self, + metadata_file: Union[os.PathLike, str], + tokenizer: TokenizerSpec, + pad_label: str, + label_info_save_dir: Optional[Union[os.PathLike, str]] = None, + ignore_extra_tokens: bool = False, + ignore_start_end: bool = True, + world_size: int = 1, + global_rank: int = 0, + shuffle_n: int = 1, + ) -> None: + super().__init__() + self.tokenizer = tokenizer + self.metadata_file = Path(metadata_file).expanduser() + if label_info_save_dir is None: + self.for_nemo_ckpt = self.metadata_file.parent / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT + else: + self.for_nemo_ckpt = Path(label_info_save_dir).expanduser() / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT + with open(self.metadata_file) as f: + self.metadata = json.load(f) + self.ignore_extra_tokens = ignore_extra_tokens + self.ignore_start_end = ignore_start_end + self.tar_files = [] + for file_path in self.metadata['tar_files']: + file_path = Path(file_path).expanduser() + if file_path.is_absolute(): + self.tar_files.append(str(file_path)) + else: + self.tar_files.append(str(self.metadata_file.parent / file_path)) + self.punct_label_vocab_file = self.metadata_file.parent / self.metadata[METADATA_PUNCT_LABEL_VOCAB_KEY] + self.capit_label_vocab_file = self.metadata_file.parent / self.metadata[METADATA_CAPIT_LABEL_VOCAB_KEY] + self.punct_label_ids = load_label_ids(self.punct_label_vocab_file) + self.capit_label_ids = load_label_ids(self.capit_label_vocab_file) + self.pad_label = pad_label + self._check_pad_label() + begin_idx = (len(self.tar_files) // world_size) * global_rank + end_idx = begin_idx + (len(self.tar_files) // world_size) + logging.info( + "Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx + ) + self.tar_files = self.tar_files[begin_idx:end_idx] + self.length = self.metadata['num_batches'] // world_size + self._dataset = wds.WebDataset(urls=self.tar_files, nodesplitter=None).decode( + wds.handle_extension('.pyd', decode_pyd) + ) + if shuffle_n > 0: + self._dataset.shuffle(shuffle_n) + else: + logging.info("WebDataset will not shuffle files within the tar files.") + self._dataset = self._dataset.to_tuple('__key__', 'batch.pyd').map(f=self._build_sample) + + def _check_pad_label(self) -> None: + """ + Checks the condition that ``pad_label`` passed to this class constructor has ``0`` id in + ``self.punct_label_ids`` and ``self.capit_label_ids`` loaded from tarred dataset. + """ + for label_ids, labels_file, task in [ + (self.punct_label_ids, self.metadata[METADATA_PUNCT_LABEL_VOCAB_KEY], "punctuation"), + (self.capit_label_ids, self.metadata[METADATA_CAPIT_LABEL_VOCAB_KEY], "capitalization"), + ]: + if label_ids[self.pad_label] != 0: + raise ValueError( + f"Pad label '{self.pad_label}' has non zero id {label_ids[self.pad_label]} in {task} " + f"ids dictionary loaded from {labels_file}." + ) + + def check_for_label_consistency_with_model_config( + self, + punct_label_ids: Optional[Dict[str, int]], + capit_label_ids: Optional[Dict[str, int]], + class_labels: DictConfig, + common_dataset_parameters_config: DictConfig, + ) -> None: + """ + Checks that label ids loaded from tarred dataset are identical to those provided in + ``model.common_dataset_parameters`` :ref:`config<common-dataset-parameters-config-label>` item. In addition, + this method checks that label ids set in attributes ``punct_label_ids`` and ``capit_label_ids`` of an instance + of + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel` + are identical to label ids loaded from tarred dataset. + + Args: + punct_label_ids: a content of ``punct_label_ids`` attribute of an instance of + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel` + in which this tarred dataset is used. + capit_label_ids: a content of ``capit_label_ids`` attribute of an instance of + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel` + in which this tarred dataset is used. + class_labels: a config item ``model.class_labels``. See more in description of + :ref:`class labels config<class-labels-config-label>`. + common_dataset_parameters_config: a config item ``model.common_dataset_parameters``. See more in + of :ref:`common dataset parameters config<common-dataset-parameters-config-label>`. + """ + tarred_dataset_label_desc_tmpl = ( + f'{{label_type}} labels loaded from tarred dataset with metadata file {self.metadata_file}' + ) + if punct_label_ids is not None: + if punct_label_ids != self.punct_label_ids: + raise_not_equal_labels_error( + first_labels=self.punct_label_ids, + second_labels=punct_label_ids, + first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Punctuation'), + second_labels_desc="Punctuation labels stored in an attribute " + "`PunctuationCapitalizationModel.punct_label_ids`", + ) + if capit_label_ids is not None: + if capit_label_ids != self.capit_label_ids: + raise_not_equal_labels_error( + first_labels=self.capit_label_ids, + second_labels=capit_label_ids, + first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Capitalization'), + second_labels_desc="Capitalization labels stored in an attribute" + "`PunctuationCapitalizationModel.capit_label_ids`", + ) + if common_dataset_parameters_config.punct_label_ids is not None: + cfg_punct_label_ids = dict(common_dataset_parameters_config.punct_label_ids) + if cfg_punct_label_ids != self.punct_label_ids: + raise_not_equal_labels_error( + first_labels=self.punct_label_ids, + second_labels=cfg_punct_label_ids, + first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Punctuation'), + second_labels_desc='Punctuation labels stored a config field ' + '`model.common_dataset_parameters.punct_label_ids`', + ) + if common_dataset_parameters_config.capit_label_ids is not None: + cfg_capit_label_ids = dict(common_dataset_parameters_config.capit_label_ids) + if cfg_capit_label_ids != self.capit_label_ids: + raise_not_equal_labels_error( + first_labels=self.capit_label_ids, + second_labels=cfg_capit_label_ids, + first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Capitalization'), + second_labels_desc='Capitalization labels stored a config field ' + '`model.common_dataset_parameters.capit_label_ids`', + ) + if common_dataset_parameters_config.label_vocab_dir is not None: + label_vocab_dir = Path(common_dataset_parameters_config.label_vocab_dir).expanduser() + punct_label_vocab_file = label_vocab_dir / class_labels.punct_labels_file + file_punct_vocab = load_label_ids(punct_label_vocab_file) + if file_punct_vocab != self.punct_label_ids: + raise_not_equal_labels_error( + first_labels=self.punct_label_ids, + second_labels=file_punct_vocab, + first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Punctuation'), + second_labels_desc=f'labels stored in file {punct_label_vocab_file} passed in ' + f'`model.common_dataset_parameters.punct_label_vocab_file`', + ) + capit_label_vocab_file = label_vocab_dir / class_labels.capit_labels_file + file_capit_vocab = load_label_ids(capit_label_vocab_file) + if file_capit_vocab != self.capit_label_ids: + raise_not_equal_labels_error( + first_labels=self.capit_label_ids, + second_labels=file_capit_vocab, + first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Capitalization'), + second_labels_desc=f'labels stored in file {capit_label_vocab_file} passed in ' + f'`model.common_dataset_parameters.capit_label_vocab_file`', + ) + + def save_labels_and_get_file_paths( + self, punct_labels_file_name: str, capit_labels_file_name: str + ) -> Tuple[Path, Path]: + """ + Copies label vocabulary files for punctuation and capitalization into directory passed in the constructor + parameter ``label_info_save_dir``. The names of new + files are ``punct_labels_file_name`` and ``capit_labels_file_name``. + + The signatures of this method and the signature of the method + :meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationDataset.save_labels_and_get_file_paths` + must be identical. + + Args: + punct_labels_file_name (:obj:`str`): a name of punctuation labels file + capit_labels_file_name (:obj:`str`): a name of capitalization labels file + + Returns: + :obj:`Tuple[Path, Path]`: a tuple of 2 elements + + - :obj:`pathlib.Path`: a path to the new punctuation label ids file + - :obj:`pathlib.Path`: a path to the new capitalization label ids file + """ + self.for_nemo_ckpt.mkdir(parents=True, exist_ok=True) + punct_label_ids_file = self.for_nemo_ckpt / punct_labels_file_name + capit_label_ids_file = self.for_nemo_ckpt / capit_labels_file_name + shutil.copy(str(self.punct_label_vocab_file), str(punct_label_ids_file)) + shutil.copy(str(self.capit_label_vocab_file), str(capit_label_ids_file)) + return punct_label_ids_file, capit_label_ids_file + + def _build_sample(self, batch: Tuple[str, Dict[str, ArrayLike]]) -> Dict[str, ArrayLike]: + """ + Takes batch loaded from tarred dataset and transforms it for passing to the model. Adds ``'segment_ids'``, + ``'input_mask'``, ``'loss_mask'`` items to the batch. + + Args: + batch: a tuple of 2 elements: batch name and a dictionary with ``'input_ids'``, ``'subtokens_mask'``, + ``'punct_labels'``, ``'capit_labels'``. Batch name is not needed for training and inference and + discarded. + + Returns: + a batch in the form of a dictionary with items: + - ``'input_ids'``: a ``np.int32`` numpy array of shape ``[Batch, Time]``; + - ``'subtokens_mask'``: a boolean numpy array of shape ``[Batch, Time]``; + - ``'punct_labels'``: a ``np.int32`` numpy array of shape ``[Batch, Time]``; + - ``'capit_labels'``: a ``np.int32`` numpy array of shape ``[Batch, Time]``; + - ``'segment_ids'``: a ``np.int8`` numpy array of shape ``[Batch, Time]``; + - ``'input_mask'``: a boolean numpy array of shape ``[Batch, Time]``; + - ``'loss_mask'``: a boolean numpy array of shape ``[Batch, Time]``. + """ + _, batch = batch + batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids( + batch['input_ids'], + batch['subtokens_mask'], + self.tokenizer.pad_id, + self.tokenizer.cls_id, + self.tokenizer.sep_id, + self.ignore_start_end, + self.ignore_extra_tokens, + ) + batch['segment_ids'] = batch_segment_ids + batch['input_mask'] = batch_input_mask + batch['loss_mask'] = batch_loss_mask + return batch + + def __iter__(self) -> Iterator[Dict[str, ArrayLike]]: + """ + Constructs an iterator of batches. The values of one batch dictionary are numpy arrays of identical shapes + ``[Batch, Time]``. + + Returns: + :obj:`Iterator[Dict[str, ArrayLike]]`: an iterator of batches with items: + + - ``'input_ids'``: ``np.int32`` array containing encoded tokens, + - ``'subtokens_mask'``: ``bool`` array which elements are ``True`` if they correspond to first token in + a word, + - ``'punct_labels'``: ``np.int32`` array with encoded punctuation labels, + - ``'capit_labels'``: ``np.int32`` array with encoded capitalization labels, + - ``'segment_ids'``: ``np.int8`` array filled with zeros (BERT token types in HuggingFace terminology), + - ``'input_mask'``: ``bool`` array which elements are ``True`` if corresponding token is not a padding + token, + - ``'loss_mask'``: ``bool`` array which elements are ``True`` if loss is computed for corresponding + token. See more in description of constructor parameters ``ignore_start_end``, ``ignore_extra_tokens``. + """ + return self._dataset.__iter__() + + def __len__(self) -> int: + return self.length + + @staticmethod + def collate_fn(batches: List[Dict[str, ArrayLike]]) -> Dict[str, torch.Tensor]: + """ + Return zeroth batch of ``batches`` list passed for collating and casts ``'segment_ids'``, ``'punct_labels'``, + ``'capit_labels'`` to types supported by + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`. + All output tensors have shape ``[Batch, Time]``. + + .. warning:: + ``batch size`` parameter of a PyTorch data loader and sampler has to be ``1``. + + Args: + batches (:obj:`List[Dict[str, ArrayLike]]`): a list of batches passed for collating + + Returns: + :obj:`Dict[str, torch.Tensor]`: a batch dictionary with following items (for detailed description of batch + items see method :meth:`__getitem__`): + + - ``'input_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor, + - ``'subtokens_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor, + - ``'punct_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor, + - ``'capit_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor, + - ``'segment_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor, + - ``'input_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor, + - ``'loss_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor. + """ + batch = {k: torch.as_tensor(v) for k, v in batches[0].items()} + batch['segment_ids'] = batch['segment_ids'].int() + batch['punct_labels'] = batch['punct_labels'].long() + batch['capit_labels'] = batch['capit_labels'].long() + return batch diff --git a/nemo/collections/nlp/models/nlp_model.py b/nemo/collections/nlp/models/nlp_model.py --- a/nemo/collections/nlp/models/nlp_model.py +++ b/nemo/collections/nlp/models/nlp_model.py @@ -65,7 +65,8 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None): def register_artifact( self, config_path: str, src: str, verify_src_exists: bool = False, ): - """ Overrides ModelPT register_artifact default behavior. NLP models usually need artifacts that are optional.""" + """ Overrides ModelPT register_artifact default behavior. + NLP models usually need artifacts that are optional.""" return super().register_artifact(config_path, src, verify_src_exists=verify_src_exists) @rank_zero_only diff --git a/nemo/collections/nlp/models/token_classification/__init__.py b/nemo/collections/nlp/models/token_classification/__init__.py --- a/nemo/collections/nlp/models/token_classification/__init__.py +++ b/nemo/collections/nlp/models/token_classification/__init__.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. - +from nemo.collections.nlp.models.token_classification.punctuation_capitalization_config import ( + PunctuationCapitalizationModelConfig, +) from nemo.collections.nlp.models.token_classification.punctuation_capitalization_model import ( PunctuationCapitalizationModel, ) diff --git a/nemo/collections/nlp/models/token_classification/punctuation_capitalization_config.py b/nemo/collections/nlp/models/token_classification/punctuation_capitalization_config.py new file mode 100644 --- /dev/null +++ b/nemo/collections/nlp/models/token_classification/punctuation_capitalization_config.py @@ -0,0 +1,325 @@ +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Any, Dict, Optional + +from omegaconf.omegaconf import MISSING, DictConfig, OmegaConf + +from nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset import ( + PunctuationCapitalizationEvalDataConfig, + PunctuationCapitalizationTrainDataConfig, + legacy_data_config_to_new_data_config, +) +from nemo.core.config import TrainerConfig +from nemo.core.config.modelPT import NemoConfig +from nemo.utils.exp_manager import ExpManagerConfig + + +@dataclass +class TokenizerConfig: + """A structure and default values of source text tokenizer.""" + + vocab_file: Optional[str] = None + """A path to vocabulary file which is used in ``'word'``, ``'char'``, and HuggingFace tokenizers""" + + tokenizer_name: str = MISSING + """A name of the tokenizer used for tokenization of source sequences. Possible options are ``'sentencepiece'``, + ``'word'``, ``'char'``, HuggingFace tokenizers (e.g. ``'bert-base-uncased'``). For more options see function + ``nemo.collections.nlp.modules.common.get_tokenizer``. The tokenizer must have properties ``cls_id``, ``pad_id``, + ``sep_id``, ``unk_id``.""" + + special_tokens: Optional[Dict[str, str]] = None + """A dictionary with special tokens passed to constructors of ``'char'``, ``'word'``, ``'sentencepiece'``, and + various HuggingFace tokenizers.""" + + tokenizer_model: Optional[str] = None + """A path to a tokenizer model required for ``'sentencepiece'`` tokenizer.""" + + +@dataclass +class LanguageModelConfig: + """ + A structure and default values of language model configuration of punctuation and capitalization model. BERT like + HuggingFace models are supported. Provide a valid ``pretrained_model_name`` and, optionally, you may + reinitialize model via ``config_file`` or ``config``. + + Alternatively you can initialize the language model using ``lm_checkpoint``. + + This config is a part of :class:`PunctuationCapitalizationModelConfig` config. + """ + + pretrained_model_name: str = MISSING + """A mandatory parameter containing name of HuggingFace pretrained model. For example, ``'bert-base-uncased'``.""" + + config_file: Optional[str] = None + """A path to a file with HuggingFace model config which is used to reinitialize language model.""" + + config: Optional[Dict] = None + """A HuggingFace config which is used to reinitialize language model.""" + + lm_checkpoint: Optional[str] = None + """A path to a ``torch`` checkpoint of a language model.""" + + +@dataclass +class HeadConfig: + """ + A structure and default values of configuration of capitalization or punctuation model head. This config defines a + multilayer perceptron which is applied to outputs of a language model. Number of units in the hidden layer is equal + to the dimension of the language model. + + This config is a part of :class:`PunctuationCapitalizationModelConfig` config. + """ + + num_fc_layers: int = 1 + """A number of hidden layers in a multilayer perceptron.""" + + fc_dropout: float = 0.1 + """A dropout used in an MLP.""" + + activation: str = 'relu' + """An activation used in hidden layers.""" + + use_transformer_init: bool = True + """Whether to initialize the weights of the classifier head with the approach that was used for language model + initialization.""" + + +@dataclass +class ClassLabelsConfig: + """ + A structure and default values of a mandatory part of config which contains names of files which are saved in .nemo + checkpoint. These files can also be used for passing label vocabulary to the model. For using them as label + vocabularies you will need to provide path these files in parameter + ``model.common_dataset_parameters.label_vocab_dir``. Each line in labels files + contains 1 label. The values are sorted, ``<line number>==<label id>``, starting from ``0``. A label with ``0`` id + must contain neutral label which must be equal to ``model.common_dataset_parameters.pad_label``. + + This config is a part of :class:`~CommonDatasetParametersConfig`. + """ + + punct_labels_file: str = MISSING + """A name of punctuation labels file.""" + + capit_labels_file: str = MISSING + """A name of capitalization labels file.""" + + +@dataclass +class CommonDatasetParametersConfig: + """ + A structure and default values of common dataset parameters config which includes label and loss mask information. + If you omit parameters ``punct_label_ids``, ``capit_label_ids``, ``label_vocab_dir``, then labels will be inferred + from a training dataset or loaded from a checkpoint. + + Parameters ``ignore_extra_tokens`` and ``ignore_start_end`` are responsible for forming loss mask. A loss mask + defines on which tokens loss is computed. + + This parameter is a part of config :class:`~PunctuationCapitalizationModelConfig`. + """ + + pad_label: str = MISSING + """A mandatory parameter which should contain label used for punctuation and capitalization label padding. It + also serves as a neutral label for both punctuation and capitalization. If any of ``punct_label_ids``, + ``capit_label_ids`` parameters is provided, then ``pad_label`` must have ``0`` id in them. In addition, if ``label_vocab_dir`` + is provided, then ``pad_label`` must be on the first lines in files ``class_labels.punct_labels_file`` and + ``class_labels.capit_labels_file``.""" + + ignore_extra_tokens: bool = False + """Whether to compute loss on not first tokens in words. If this parameter is ``True``, then loss mask is ``False`` + for all tokens in a word except the first.""" + + ignore_start_end: bool = True + """If ``False``, then loss is computed on [CLS] and [SEP] tokens.""" + + punct_label_ids: Optional[Dict[str, int]] = None + """A dictionary with punctuation label ids. ``pad_label`` must have ``0`` id in this dictionary. You can omit this + parameter and pass label ids through ``class_labels.punct_labels_file`` or let the model to infer label ids from + dataset or load them from checkpoint.""" + + capit_label_ids: Optional[Dict[str, int]] = None + """A dictionary with capitalization label ids. ``pad_label`` must have ``0`` id in this dictionary. You can omit + this parameter and pass label ids through ``class_labels.capit_labels_file`` or let model to infer label ids from + dataset or load them from checkpoint.""" + + label_vocab_dir: Optional[str] = None + """A path to directory which contains class labels files. See :class:`ClassLabelsConfig`. If this parameter is + provided, then labels will be loaded from files which are located in ``label_vocab_dir`` and have names specified + in ``model.class_labels`` configuration section. A label specified in ``pad_label`` has to be on the first lines + of ``model.class_labels`` files.""" + + +@dataclass +class PunctuationCapitalizationModelConfig: + """ + A configuration of + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel` + model. + + See an example of model config in + `nemo/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml + <https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml>`_ + + This config is a part of :class:`~PunctuationCapitalizationConfig`. + """ + + class_labels: ClassLabelsConfig = ClassLabelsConfig() + """A mandatory parameter containing a dictionary with names of label id files used in .nemo checkpoints. + These file names can also be used for passing label vocabularies to the model. If you wish to use ``class_labels`` + for passing vocabularies, please provide path to vocabulary files in + ``model.common_dataset_parameters.label_vocab_dir`` parameter.""" + + common_dataset_parameters: Optional[CommonDatasetParametersConfig] = CommonDatasetParametersConfig() + """Label ids and loss mask information information.""" + + train_ds: Optional[PunctuationCapitalizationTrainDataConfig] = None + """A configuration for creating training dataset and data loader.""" + + validation_ds: Optional[PunctuationCapitalizationEvalDataConfig] = None + """A configuration for creating validation datasets and data loaders.""" + + test_ds: Optional[PunctuationCapitalizationEvalDataConfig] = None + """A configuration for creating test datasets and data loaders.""" + + punct_head: HeadConfig = HeadConfig() + """A configuration for creating punctuation MLP head that is applied to a language model outputs.""" + + capit_head: HeadConfig = HeadConfig() + """A configuration for creating capitalization MLP head that is applied to a language model outputs.""" + + tokenizer: Any = TokenizerConfig() + """A configuration for source text tokenizer.""" + + language_model: LanguageModelConfig = LanguageModelConfig() + """A configuration of a BERT-like language model which serves as a model body.""" + + optim: Optional[Any] = None + """A configuration of optimizer and learning rate scheduler. There is much variability in such config. For + description see `Optimizers + <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/core/core.html#optimizers>`_ section in + documentation and `primer <https://github.com/NVIDIA/NeMo/blob/main/tutorials/00_NeMo_Primer.ipynb>_ tutorial.""" + + +@dataclass +class PunctuationCapitalizationConfig(NemoConfig): + """ + A config for punctuation model training and testing. + + See an example of full config in + `nemo/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml + <https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml>`_ + """ + + pretrained_model: Optional[str] = None + """Can be an NVIDIA's NGC cloud model or a path to a .nemo checkpoint. You can get list of possible cloud options + by calling method + :func:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel.list_available_models`. + """ + + name: Optional[str] = 'Punctuation_and_Capitalization' + """A name of the model. Used for naming output directories and ``.nemo`` checkpoints.""" + + do_training: bool = True + """Whether to perform training of the model.""" + + do_testing: bool = False + """Whether ot perform testing of the model.""" + + model: PunctuationCapitalizationModelConfig = PunctuationCapitalizationModelConfig() + """A configuration for the + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel` + model.""" + + trainer: Optional[TrainerConfig] = TrainerConfig() + """Contains ``Trainer`` Lightning class constructor parameters.""" + + exp_manager: Optional[ExpManagerConfig] = ExpManagerConfig(name=name, files_to_copy=[]) + """A configuration with various NeMo training options such as output directories, resuming from checkpoint, + tensorboard and W&B logging, and so on. For possible options see :ref:`exp-manager-label`.""" + + +def is_legacy_model_config(model_cfg: DictConfig) -> bool: + """ + Test if model config is old style config. Old style configs are configs which were used before + ``common_dataset_parameters`` item was added. Old style datasets use ``dataset`` instead of + ``common_dataset_parameters``, ``batch_size`` instead of ``tokens_in_batch``. Old style configs do not support + tarred datasets. + + Args: + model_cfg: model configuration + + Returns: + whether ``model_config`` is legacy + """ + return 'common_dataset_parameters' not in model_cfg + + +def legacy_model_config_to_new_model_config(model_cfg: DictConfig) -> DictConfig: + """ + Transform old style config into + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationModelConfig`. + Old style configs are configs which were used before ``common_dataset_parameters`` item was added. Old style + datasets use ``dataset`` instead of ``common_dataset_parameters``, ``batch_size`` instead of ``tokens_in_batch``. + Old style configs do not support tarred datasets. + + Args: + model_cfg: old style config + + Returns: + model config which follows dataclass + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationModelConfig` + """ + train_ds = model_cfg.get('train_ds') + validation_ds = model_cfg.get('validation_ds') + test_ds = model_cfg.get('test_ds') + dataset = model_cfg.dataset + punct_head_config = model_cfg.get('punct_head', {}) + capit_head_config = model_cfg.get('capit_head', {}) + return OmegaConf.structured( + PunctuationCapitalizationModelConfig( + class_labels=model_cfg.class_labels, + common_dataset_parameters=CommonDatasetParametersConfig( + pad_label=dataset.pad_label, + ignore_extra_tokens=dataset.get( + 'ignore_extra_tokens', CommonDatasetParametersConfig.ignore_extra_tokens + ), + ignore_start_end=dataset.get('ignore_start_end', CommonDatasetParametersConfig.ignore_start_end), + punct_label_ids=model_cfg.punct_label_ids, + capit_label_ids=model_cfg.capit_label_ids, + ), + train_ds=None + if train_ds is None + else legacy_data_config_to_new_data_config(train_ds, dataset, train=True), + validation_ds=None + if validation_ds is None + else legacy_data_config_to_new_data_config(validation_ds, dataset, train=False), + test_ds=None if test_ds is None else legacy_data_config_to_new_data_config(test_ds, dataset, train=False), + punct_head=HeadConfig( + num_fc_layers=punct_head_config.get('punct_num_fc_layers', HeadConfig.num_fc_layers), + fc_dropout=punct_head_config.get('fc_dropout', HeadConfig.fc_dropout), + activation=punct_head_config.get('activation', HeadConfig.activation), + use_transformer_init=punct_head_config.get('use_transformer_init', HeadConfig.use_transformer_init), + ), + capit_head=HeadConfig( + num_fc_layers=capit_head_config.get('capit_num_fc_layers', HeadConfig.num_fc_layers), + fc_dropout=capit_head_config.get('fc_dropout', HeadConfig.fc_dropout), + activation=capit_head_config.get('activation', HeadConfig.activation), + use_transformer_init=capit_head_config.get('use_transformer_init', HeadConfig.use_transformer_init), + ), + tokenizer=model_cfg.tokenizer, + language_model=model_cfg.language_model, + optim=model_cfg.optim, + ) + ) diff --git a/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py b/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py --- a/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py +++ b/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,23 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +import copy from math import ceil -from typing import Dict, List, Optional, Tuple +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch +from numpy.typing import ArrayLike from omegaconf import DictConfig, OmegaConf from pytorch_lightning import Trainer +from pytorch_lightning.utilities.types import EPOCH_OUTPUT from tqdm import tqdm from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss +from nemo.collections.common.metrics import GlobalAverageLossMetric from nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset import ( BertPunctuationCapitalizationDataset, + PunctuationCapitalizationEvalDataConfig, + PunctuationCapitalizationTrainDataConfig, + load_label_ids, + raise_not_equal_labels_error, +) +from nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset import ( BertPunctuationCapitalizationInferDataset, ) +from nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset import ( + BertPunctuationCapitalizationTarredDataset, +) from nemo.collections.nlp.metrics.classification_report import ClassificationReport from nemo.collections.nlp.models.nlp_model import NLPModel +from nemo.collections.nlp.models.token_classification.punctuation_capitalization_config import ( + is_legacy_model_config, + legacy_model_config_to_new_model_config, +) from nemo.collections.nlp.modules.common import TokenClassifier from nemo.collections.nlp.modules.common.lm_utils import get_lm_model from nemo.core.classes.common import PretrainedModelInfo, typecheck @@ -40,25 +57,59 @@ class PunctuationCapitalizationModel(NLPModel, Exportable): + """ + A model for restoring punctuation and capitalization in text. The model is usually used together with ASR model + because ASR models often return text without punctuation and capitalization. + + The model consists of a language model and two multilayer perceptrons (MLP) on top the language model. The first + MLP serves for punctuation prediction and the second is for capitalization prediction. You can use only BERT-like + HuggingFace language models (model ``forward`` method accepts ``input_ids``, ``token_types_ids``, + ``attention_mask`` arguments). See more about model config options :ref:`here<model-config-label>`. + + Use method :meth:`~add_punctuation_capitalization` for model inference. + + For training and testing use dataset + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`, + for training on huge amounts of data which cannot be loaded into memory simultaneously use + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`. + + Args: + cfg (:obj:`DictConfig`): a model configuration. It should follow dataclass + :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_config.PunctuationCapitalizationModelConfig` + See an example of full config in + `nemo/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml + <https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml>`_ + trainer (:obj:`pytorch_lightning.Trainer`): an instance of a PyTorch Lightning trainer + """ + @property def input_types(self) -> Optional[Dict[str, NeuralType]]: + """Neural types of a :meth:`forward` method input.""" return self.bert_model.input_types @property def output_types(self) -> Optional[Dict[str, NeuralType]]: + """Neural types of a :meth:`forward` method output.""" return { "punct_logits": NeuralType(('B', 'T', 'C'), LogitsType()), "capit_logits": NeuralType(('B', 'T', 'C'), LogitsType()), } - def __init__(self, cfg: DictConfig, trainer: Trainer = None): - """ - Initializes BERT Punctuation and Capitalization model. - """ + def __init__(self, cfg: DictConfig, trainer: Trainer = None) -> None: + """Initializes BERT Punctuation and Capitalization model.""" + if is_legacy_model_config(cfg): + cfg = legacy_model_config_to_new_model_config(cfg) self.setup_tokenizer(cfg.tokenizer) - + self.world_size = 1 + if trainer is not None: + self.world_size = trainer.num_nodes * trainer.num_gpus + self.metrics = None + self.label_ids_are_set = False + self.punct_label_ids = None + self.capit_label_ids = None super().__init__(cfg=cfg, trainer=trainer) - + if not self.label_ids_are_set: + self._set_label_ids() self.bert_model = get_lm_model( pretrained_model_name=cfg.language_model.pretrained_model_name, config_file=self.register_artifact('language_model.config_file', cfg.language_model.config_file), @@ -69,46 +120,51 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None): self.punct_classifier = TokenClassifier( hidden_size=self.bert_model.config.hidden_size, - num_classes=len(self._cfg.punct_label_ids), + num_classes=len(self.punct_label_ids), activation=cfg.punct_head.activation, log_softmax=False, dropout=cfg.punct_head.fc_dropout, - num_layers=cfg.punct_head.punct_num_fc_layers, + num_layers=cfg.punct_head.num_fc_layers, use_transformer_init=cfg.punct_head.use_transformer_init, ) self.capit_classifier = TokenClassifier( hidden_size=self.bert_model.config.hidden_size, - num_classes=len(self._cfg.capit_label_ids), + num_classes=len(self.capit_label_ids), activation=cfg.capit_head.activation, log_softmax=False, dropout=cfg.capit_head.fc_dropout, - num_layers=cfg.capit_head.capit_num_fc_layers, + num_layers=cfg.capit_head.num_fc_layers, use_transformer_init=cfg.capit_head.use_transformer_init, ) self.loss = CrossEntropyLoss(logits_ndim=3) self.agg_loss = AggregatorLoss(num_inputs=2) - # setup to track metrics - self.punct_class_report = ClassificationReport( - num_classes=len(self._cfg.punct_label_ids), - label_ids=self._cfg.punct_label_ids, - mode='macro', - dist_sync_on_step=True, - ) - self.capit_class_report = ClassificationReport( - num_classes=len(self._cfg.capit_label_ids), - label_ids=self._cfg.capit_label_ids, - mode='macro', - dist_sync_on_step=True, - ) - @typecheck() - def forward(self, input_ids, attention_mask, token_type_ids=None): + def forward( + self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: Optional[torch.Tensor] = None + ) -> Tuple[torch.Tensor, torch.Tensor]: """ - No special modification required for Lightning, define it as you normally would - in the `nn.Module` in vanilla PyTorch. + Executes a forward pass through the model. For more details see ``forward`` method of HuggingFace BERT-like + (models which accept ``input_ids``, ``attention_mask``, ``token_type_ids`` arguments) models. + + Args: + input_ids (:obj:`torch.Tensor`): an integer torch tensor of shape ``[Batch, Time]``. Contains encoded + source tokens. + attention_mask (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Contains an + attention mask for excluding paddings. + token_type_ids (:obj:`torch.Tensor`): an integer torch Tensor of shape ``[Batch, Time]``. Contains an index + of segment to which a token belongs. If ``token_type_ids`` is not ``None``, then it should be a zeros + tensor. + + Returns: + :obj:`Tuple[torch.Tensor, torch.Tensor]`: a tuple containing + + - ``punct_logits`` (:obj:`torch.Tensor`): a float torch tensor of shape + ``[Batch, Time, NumPunctuationLabels]`` containing punctuation logits + - ``capit_logits`` (:obj:`torch.Tensor`): a float torch tensor of shape + ``[Batch, Time, NumCapitalizationLabels]`` containing capitalization logits """ hidden_states = self.bert_model( input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask @@ -117,251 +173,677 @@ def forward(self, input_ids, attention_mask, token_type_ids=None): capit_logits = self.capit_classifier(hidden_states=hidden_states) return punct_logits, capit_logits - def _make_step(self, batch): - input_ids, input_type_ids, input_mask, subtokens_mask, loss_mask, punct_labels, capit_labels = batch + def _make_step(self, batch: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: punct_logits, capit_logits = self( - input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask + input_ids=batch['input_ids'], token_type_ids=batch['segment_ids'], attention_mask=batch['input_mask'] ) - punct_loss = self.loss(logits=punct_logits, labels=punct_labels, loss_mask=loss_mask) - capit_loss = self.loss(logits=capit_logits, labels=capit_labels, loss_mask=loss_mask) + punct_loss = self.loss(logits=punct_logits, labels=batch['punct_labels'], loss_mask=batch['loss_mask']) + capit_loss = self.loss(logits=capit_logits, labels=batch['capit_labels'], loss_mask=batch['loss_mask']) loss = self.agg_loss(loss_1=punct_loss, loss_2=capit_loss) return loss, punct_logits, capit_logits - def training_step(self, batch, batch_idx): + def training_step(self, batch: Dict[str, torch.Tensor], batch_idx: int) -> Dict[str, Union[torch.Tensor, float]]: """ - Lightning calls this inside the training loop with the data from the training dataloader - passed in as `batch`. + Lightning calls this inside the training loop with the data from the training dataloader passed in as + ``batch``. + + Args: + batch: a dictionary with following + items: + + - ``'input_ids'`` (:obj:`torch.Tensor`): an integer torch tensor of shape ``[Batch, Time]`` containing + encoded source text + - ``'segment_ids'`` (:obj:`torch.Tensor`): a zeros integer torch tensor of shape ``[Batch, Time]`` + - ``'input_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Serves as + attention mask. should be ``False`` on padding tokens and ``True`` on other tokens. + - ``'loss_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Which token + to compute loss on. See more details in description of parameters ``ignore_start_end`` and + ``ignore_extra_tokens`` of a class + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset` + - ``'punct_labels'`` (:obj:`torch.Tensor`): a ``long`` torch tensor of shape ``[Batch, Time]``. + Contains encoded punctuation labels + - ``'capit_labels'`` (:obj:`torch.Tensor`): a ``long`` torch tensor of shape ``[Batch, Time]``. + Contains encoded capitalization labels + - ``'subtokens_mask'`` (:obj:`torch.Tensor`): not required for training and can be omitted + + batch_idx (:obj:`int`): an index of batch. Mandatory Lightning parameter + + Returns: + :obj:`Dict[str, Union[torch.Tensor, float]]`: a dictionary with 2 items: + + - ``'loss'`` (:obj:`torch.Tensor`): torch tensor containing mean aggregated punctuation and + capitalization loss + - ``'lr'`` (:obj:`float`): a float containing learning rate """ loss, _, _ = self._make_step(batch) lr = self._optimizer.param_groups[0]['lr'] - self.log('lr', lr, prog_bar=True) self.log('train_loss', loss) - return {'loss': loss, 'lr': lr} - def validation_step(self, batch, batch_idx, dataloader_idx=0): - """ - Lightning calls this inside the validation loop with the data from the validation dataloader - passed in as `batch`. + def eval_step(self, batch: Dict[str, torch.Tensor], mode: str, dataloader_idx: int) -> Dict[str, None]: """ - _, _, _, subtokens_mask, _, punct_labels, capit_labels = batch - val_loss, punct_logits, capit_logits = self._make_step(batch) + A method called by :meth:`validation_step` and :meth:`test_step`. Performs forward pass and updates metrics. - subtokens_mask = subtokens_mask > 0.5 - punct_preds = torch.argmax(punct_logits, axis=-1)[subtokens_mask] - punct_labels = punct_labels[subtokens_mask] - self.punct_class_report.update(punct_preds, punct_labels) + Args: + batch (:obj:`Dict[str, torch.Tensor]`): a dictionary with following items: + + - ``'input_ids'`` (:obj:`torch.Tensor`): an integer torch tensor of shape ``[Batch, Time]`` containing + encoded source text. + - ``'subtokens_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. An + element of this item is ``True`` if corresponding token from ``'input_ids'`` element is the first + token in some word. + - ``'segment_ids'`` (:obj:`torch.Tensor`): a zeros integer torch tensor of shape ``[Batch, Time]``. + - ``'input_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Serves as + attention mask. should be ``False`` on padding tokens and ``True`` on other tokens. + - ``'loss_mask'`` (:obj:`torch.Tensor`): a boolean torch tensor of shape ``[Batch, Time]``. Which token + to compute loss on. See more details in description of parameters ``ignore_start_end`` and + ``ignore_extra_tokens`` of class + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`. + - ``'punct_labels'`` (:obj:`torch.Tensor`): a long torch tensor of shape ``[Batch, Time]``. Contains + encoded punctuation labels. + - ``'capit_labels'`` (:obj:`torch.Tensor`): a long torch tensor of shape ``[Batch, Time]``. Contains + encoded capitalization labels. + mode: either ``'validation'`` or ``'test'`` depending on caller method. + dataloader_idx: NeMo parameter for multi dataset validation. + Returns: + :obj:`Dict[str, None]`: a dictionary containing items ``'loss'``, ``'punct_class_report'``, + ``'capit_class_report'`` which values are ``None``. Values are ``None`` because metrics are computed using + ``torchmetrics``. + """ + loss, punct_logits, capit_logits = self._make_step(batch) + subtokens_mask = batch['subtokens_mask'] + punct_preds = torch.argmax(punct_logits, axis=-1)[subtokens_mask] + punct_labels = batch['punct_labels'][subtokens_mask] capit_preds = torch.argmax(capit_logits, axis=-1)[subtokens_mask] - capit_labels = capit_labels[subtokens_mask] - self.capit_class_report.update(capit_preds, capit_labels) + capit_labels = batch['capit_labels'][subtokens_mask] + self.metrics[mode]['loss'][dataloader_idx]( + loss=loss, num_measurements=batch['loss_mask'].sum().to(loss.device) + ) + self.metrics[mode]['punct_class_report'][dataloader_idx](punct_preds, punct_labels) + self.metrics[mode]['capit_class_report'][dataloader_idx](capit_preds, capit_labels) + # torchmetrics are used for metrics computation + return {'loss': None, 'punct_class_report': None, 'capit_class_report': None} + + def validation_step( + self, batch: Dict[str, torch.Tensor], batch_idx: int, dataloader_idx: int = 0 + ) -> Dict[str, None]: + """ + Lightning calls this inside the validation loop with the data from the validation dataloader passed in as + ``batch``. See more details in :meth:`eval_step`. - return { - 'val_loss': val_loss, - 'punct_tp': self.punct_class_report.tp, - 'punct_fn': self.punct_class_report.fn, - 'punct_fp': self.punct_class_report.fp, - 'capit_tp': self.capit_class_report.tp, - 'capit_fn': self.capit_class_report.fn, - 'capit_fp': self.capit_class_report.fp, - } + Args: + batch (:obj:`dict`): see :meth:`eval_step` for the ``batch`` parameter explanation + batch_idx (:obj:`int`): an index of a batch in a dataset. A mandatory Lightning parameter + dataloader_idx (:obj:`int`): a NeMo parameter for performing testing on multiple datasets - def test_step(self, batch, batch_idx, dataloader_idx=0): - """ - Lightning calls this inside the validation loop with the data from the validation dataloader - passed in as `batch`. + Returns: + :obj:`Dict[str, None]`: a dictionary containing items ``'loss'``, ``'punct_class_report'``, + ``'capit_class_report'`` which values are ``None``. Values are ``None`` because metrics are computed using + ``torchmetrics``. """ - _, _, _, subtokens_mask, _, punct_labels, capit_labels = batch - test_loss, punct_logits, capit_logits = self._make_step(batch) - - subtokens_mask = subtokens_mask > 0.5 - punct_preds = torch.argmax(punct_logits, axis=-1)[subtokens_mask] - punct_labels = punct_labels[subtokens_mask] - self.punct_class_report.update(punct_preds, punct_labels) + return self.eval_step(batch, 'val', dataloader_idx) - capit_preds = torch.argmax(capit_logits, axis=-1)[subtokens_mask] - capit_labels = capit_labels[subtokens_mask] - self.capit_class_report.update(capit_preds, capit_labels) + def test_step(self, batch: Dict[str, torch.Tensor], batch_idx: int, dataloader_idx: int = 0) -> Dict[str, None]: + """ + Lightning calls this inside the test loop with the data from the test dataloader passed in as ``batch``. + See more details in :meth:`eval_step`. - return { - 'test_loss': test_loss, - 'punct_tp': self.punct_class_report.tp, - 'punct_fn': self.punct_class_report.fn, - 'punct_fp': self.punct_class_report.fp, - 'capit_tp': self.capit_class_report.tp, - 'capit_fn': self.capit_class_report.fn, - 'capit_fp': self.capit_class_report.fp, - } + Args: + batch (:obj:`dict`): see :meth:`eval_step` for the ``batch`` parameter explanation + batch_idx (:obj:`int`): an index of a batch in a dataset. A mandatory Lightning parameter + dataloader_idx (:obj:`int`): a NeMo parameter for performing testing on multiple datasets - def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0): + Returns: + :obj:`Dict[str, None]`: a dictionary containing items ``'loss'``, ``'punct_class_report'``, + ``'capit_class_report'`` which values are ``None``. Values are ``None`` because metrics are computed using + ``torchmetrics``. """ - Called at the end of validation to aggregate outputs. - outputs: list of individual outputs of each validation step. + return self.eval_step(batch, 'test', dataloader_idx) + + def training_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: """ - avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() + Called at the end of training epoch. This method properly shuffles + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`. + Regular data loader shuffling only permutes batches. - # calculate metrics and log classification report for Punctuation task - punct_precision, punct_recall, punct_f1, punct_report = self.punct_class_report.compute() + Args: + outputs (:obj:`pytorch_lightning.utilities.types.EPOCH_OUTPUT`): an output of all training steps. It is a + mandatory PyTorch Lightning parameter and it is not used in this method + """ + shuffle = self._cfg.train_ds.get('shuffle') + if shuffle is None: # Encountered legacy config + shuffle = not self.cfg.train_ds.get('use_tarred_dataset', False) + if shuffle: + if isinstance(self.train_dataloader().dataset, BertPunctuationCapitalizationDataset): + self.train_dataloader().dataset.repack_batches_with_shuffle() + + def _multi_eval_epoch_end(self, mode: str, dataloader_idx: int) -> Dict[str, Dict[str, torch.Tensor]]: + loss = self.metrics[mode]['loss'][dataloader_idx].compute() + self.metrics[mode]['loss'][dataloader_idx].reset() + + punct_res = self.metrics[mode]['punct_class_report'][dataloader_idx].compute() + punct_precision, punct_recall, punct_f1, punct_report = punct_res + self.metrics[mode]['punct_class_report'][dataloader_idx].reset() + + capit_res = self.metrics[mode]['capit_class_report'][dataloader_idx].compute() + capit_precision, capit_recall, capit_f1, capit_report = capit_res + self.metrics[mode]['capit_class_report'][dataloader_idx].reset() + log_dict = { + 'log': { + f'{mode}_loss': loss, + f'{mode}_punct_precision': punct_precision, + f'{mode}_punct_f1': punct_f1, + f'{mode}_punct_recall': punct_recall, + f'{mode}_capit_precision': capit_precision, + f'{mode}_capit_f1': capit_f1, + f'{mode}_capit_recall': capit_recall, + } + } logging.info(f'Punctuation report: {punct_report}') - - # calculate metrics and log classification report for Capitalization task - capit_precision, capit_recall, capit_f1, capit_report = self.capit_class_report.compute() logging.info(f'Capitalization report: {capit_report}') + return log_dict - self.log('val_loss', avg_loss, prog_bar=True) - self.log('punct_precision', punct_precision) - self.log('punct_f1', punct_f1) - self.log('punct_recall', punct_recall) - self.log('capit_precision', capit_precision) - self.log('capit_f1', capit_f1) - self.log('capit_recall', capit_recall) - - self.punct_class_report.reset() - self.capit_class_report.reset() - - def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0): + def multi_validation_epoch_end(self, outputs: Any, dataloader_idx: int = 0) -> Dict[str, Dict[str, torch.Tensor]]: """ - Called at the end of test to aggregate outputs. - outputs: list of individual outputs of each validation step. + Called at the end of validation to compute and log metrics. """ - avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() - - # calculate metrics and log classification report for Punctuation task - punct_precision, punct_recall, punct_f1, punct_report = self.punct_class_report.compute() - logging.info(f'Punctuation report: {punct_report}') - - # calculate metrics and log classification report for Capitalization task - capit_precision, capit_recall, capit_f1, capit_report = self.capit_class_report.compute() - logging.info(f'Capitalization report: {capit_report}') + return self._multi_eval_epoch_end('val', dataloader_idx) - self.log('test_loss', avg_loss, prog_bar=True) - self.log('punct_precision', punct_precision) - self.log('punct_f1', punct_f1) - self.log('punct_recall', punct_recall) - self.log('capit_precision', capit_precision) - self.log('capit_f1', capit_f1) - self.log('capit_recall', capit_recall) + def multi_test_epoch_end(self, outputs: Any, dataloader_idx: int = 0) -> Dict[str, Dict[str, torch.Tensor]]: + """ + Called at the end of model testing to compute and log metrics. + """ + return self._multi_eval_epoch_end('test', dataloader_idx) - def update_data_dir(self, data_dir: str) -> None: + def update_config_after_restoring_from_checkpoint(self, **kwargs) -> None: + """ + Set new values for some sections of config. Useful after restoring from checkpoint for fine tuning + and testing if config parameters of a restored checkpoint are not suitable. + + For ``class_labels``, ``common_dataset_parameters``, ``train_ds``, ``validation_ds``, ``test_ds``, there is + no need to provide values for all items in an updated config section. If an item is omitted in this method + parameter, then corresponding item in model config does not change. + + If the entire updated section is missing in the model config, then omitted items from this method parameters + are set according to default values listed + :ref:`here <run-config-label>`. + + .. warning:: + Parameter ``optim`` is processed in a special way. ``optim`` contents are used not for updating of + model config, but for replacement of entire config section. + + If one of parameters ``train_ds``, ``validation_ds``, ``test_ds``, is provided but its value is + ``None``, then corresponding section is replaced with ``None``. + + .. warning:: + You may change values of parameters related to label ids: + + - ``common_dataset_parameters.punct_label_ids``, + - ``common_dataset_parameters.capit_label_ids``, + - ``common_dataset_parameters.label_vocab_dir``, + - ``class_labels.punct_labels_file``, + - ``class_labels.capit_labels_file``, + + yet label ids in these parameters must be equal to label ids loaded from checkpoint. Otherwise, + an error will be raised. + + Keyword Args: + class_labels (:obj:`Union[DictConfig, Dict[str, str]]`): names of label id files used as label + id dictionaries. See more in :ref:`class labels config<class-labels-config-label>`. + common_dataset_parameters (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): see more in + :ref:`common dataset parameters config<common-dataset-parameters-config-label>`. + train_ds (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): configuration of training dataset. See + possible options in :ref:`data config<data-config-label>`. + validation_ds (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): configuration of validation + dataset. See possible options in :ref:`data config<data-config-label>`. + test_ds (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): configuration of test dataset. See + possible options in :ref:`data config<data-config-label>`. + optim (:obj:`Union[DictConfig, Dict[str, Any]]`, `optional`): optimization configuration. See possible + options in :ref:`optimization<optimization-label>` and in `primer + <https://github.com/NVIDIA/NeMo/blob/main/tutorials/00_NeMo_Primer.ipynb>`_ tutorial. + """ + allowed_keys = {'class_labels', 'common_dataset_parameters', 'train_ds', 'validation_ds', 'test_ds', 'optim'} + unexpected_keys = set(kwargs) - allowed_keys + if unexpected_keys: + raise ValueError( + f"Found unexpected keyword arguments: {unexpected_keys}. You can use only {allowed_keys}." + ) + if 'class_labels' in kwargs: + if kwargs['class_labels'] is None: + raise ValueError( + f"'class_labels' parameters is `None`, whereas you cannot remove section 'class_labels' from model " + f"config." + ) + self._cfg.class_labels = OmegaConf.merge(self._cfg.class_labels, OmegaConf.create(kwargs['class_labels'])) + if 'common_dataset_parameters' in kwargs: + if kwargs['common_dataset_parameters'] is None: + raise ValueError( + f"'common_dataset_parameters' item is `None`, whereas you cannot remove section" + f"'common_dataset_parameters' from model config." + ) + self._cfg.common_dataset_parameters = OmegaConf.merge( + self._cfg.common_dataset_parameters, OmegaConf.create(kwargs['common_dataset_parameters']) + ) + self._check_label_config_parameters() + if 'train_ds' in kwargs: + if kwargs['train_ds'] is None: + self._cfg.train_ds = None + else: + if 'train_ds' in self._cfg and self._cfg.train_ds is not None: + base = self._cfg.train_ds + else: + base = OmegaConf.structured(PunctuationCapitalizationTrainDataConfig) + self._cfg.train_ds = OmegaConf.merge(base, OmegaConf.create(kwargs['train_ds'])) + if 'validation_ds' in kwargs: + if kwargs['validation_ds'] is None: + self._cfg.validation_ds = None + else: + if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None: + base = self._cfg.validation_ds + else: + base = OmegaConf.structured(PunctuationCapitalizationEvalDataConfig) + self._cfg.validation_ds = OmegaConf.merge(base, OmegaConf.create(kwargs['validation_ds'])) + if 'test_ds' in kwargs: + if kwargs['test_ds'] is None: + self._cfg.test_ds = None + else: + if 'test_ds' in self._cfg and self._cfg.test_ds is not None: + base = self._cfg.test_ds + else: + base = OmegaConf.structured(PunctuationCapitalizationEvalDataConfig) + self._cfg.test_ds = OmegaConf.merge(base, OmegaConf.create(kwargs['test_ds'])) + if 'optim' in kwargs: + self._cfg.optim = kwargs['optim'] + + def setup_training_data(self, train_data_config: Optional[Union[Dict[str, Any], DictConfig]] = None) -> None: """ - Update data directory + Sets up training data: creates dataset and sets data loader. If parameter ``train_data_config`` is not + provided, then :ref:`config<model-config-label>` section ``train_ds`` will be used. Args: - data_dir: path to data directory + train_data_config (:obj:`Union[Dict[str, Any], DictConfig]`, `optional`): a dictionary that should contain + only fields present in :ref:`data config<data-config-label>`. + If some of the fields are missing, then they will be set according to + :ref:`data config<data-config-label>` defaults. If ``train_data_config`` parameter is not set, then + ``train_ds`` item of model config is used. Here model config is a configuration used for model + instantiation. """ - if os.path.exists(data_dir): - logging.info(f'Setting model.dataset.data_dir to {data_dir}.') - self._cfg.dataset.data_dir = data_dir - else: - raise ValueError(f'{data_dir} not found') - - def setup_training_data(self, train_data_config: Optional[DictConfig] = None): - """Setup training data""" + if train_data_config is not None: + train_data_config = OmegaConf.create(train_data_config) + train_data_config = OmegaConf.merge( + OmegaConf.structured(PunctuationCapitalizationTrainDataConfig), train_data_config + ) if train_data_config is None: train_data_config = self._cfg.train_ds - # for older(pre - 1.0.0.b3) configs compatibility - if not hasattr(self._cfg, "class_labels") or self._cfg.class_labels is None: - OmegaConf.set_struct(self._cfg, False) - self._cfg.class_labels = {} - self._cfg.class_labels = OmegaConf.create( - {'punct_labels_file': 'punct_label_ids.csv', 'capit_labels_file': 'capit_label_ids.csv'} - ) - - self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config) - + self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, train=True) + self.punct_label_ids = self._train_dl.dataset.punct_label_ids.copy() + self.capit_label_ids = self._train_dl.dataset.capit_label_ids.copy() + self.label_ids_are_set = True if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: - self.register_artifact('class_labels.punct_labels_file', self._train_dl.dataset.punct_label_ids_file) - self.register_artifact('class_labels.capit_labels_file', self._train_dl.dataset.capit_label_ids_file) + label_vocab_dir = self._cfg.common_dataset_parameters.label_vocab_dir + if label_vocab_dir is None: + punct_label_ids_file, capit_label_ids_file = self._train_dl.dataset.save_labels_and_get_file_paths( + self._cfg.class_labels.punct_labels_file, self._cfg.class_labels.capit_labels_file + ) + else: + punct_label_ids_file = Path(label_vocab_dir).expanduser() / self._cfg.class_labels.punct_labels_file + capit_label_ids_file = Path(label_vocab_dir).expanduser() / self._cfg.class_labels.capit_labels_file + self.register_artifact('class_labels.punct_labels_file', str(punct_label_ids_file)) + self.register_artifact('class_labels.capit_labels_file', str(capit_label_ids_file)) - # save label maps to the config - self._cfg.punct_label_ids = OmegaConf.create(self._train_dl.dataset.punct_label_ids) - self._cfg.capit_label_ids = OmegaConf.create(self._train_dl.dataset.capit_label_ids) + def _get_eval_metrics_kwargs( + self, + ) -> Tuple[ + Dict[str, bool], + Dict[str, Union[bool, str, int, Dict[str, int]]], + Dict[str, Union[bool, str, int, Dict[str, int]]], + ]: + loss_kw = {'dist_sync_on_step': False, 'take_avg_loss': True} + punct_kw = { + 'num_classes': len(self.punct_label_ids), + 'label_ids': self.punct_label_ids, + 'mode': 'macro', + 'dist_sync_on_step': False, + } + capit_kw = { + 'num_classes': len(self.capit_label_ids), + 'label_ids': self.capit_label_ids, + 'mode': 'macro', + 'dist_sync_on_step': False, + } + return loss_kw, punct_kw, capit_kw + + def _setup_metrics_dictionary(self) -> None: + eval_metrics = torch.nn.ModuleDict( + { + "loss": torch.nn.ModuleList([]), + "punct_class_report": torch.nn.ModuleList([]), + "capit_class_report": torch.nn.ModuleList([]), + } + ) + self.metrics = torch.nn.ModuleDict({"val": eval_metrics, "test": copy.deepcopy(eval_metrics)}) - def setup_validation_data(self, val_data_config: Optional[Dict] = None): + def setup_validation_data(self, val_data_config: Optional[Union[Dict[str, Any], DictConfig]] = None) -> None: """ - Setup validaton data + Sets up validation data: creates dataset and sets data loader. If parameter ``val_data_config`` is not + provided, then ``validation_ds`` :ref:`config <model-config-label>` section will be used. Here model config is + a configuration used for model instantiation. - val_data_config: validation data config + Args: + val_data_config (:obj:`Union[Dict[str, Any], DictConfig]`, `optional`): a dictionary that should contain + only fields present in data config :ref:`description<data-config-label>`. + If some of the fields are missing, then they will be set according to data config + :ref:`description<data-config-label>` defaults. If ``val_data_config`` parameter is not set, then + ``validation_ds`` item of model config is used. Here model config is a configuration used for model + instantiation. """ + if val_data_config is not None: + val_data_config = OmegaConf.create(val_data_config) + val_data_config = OmegaConf.merge( + OmegaConf.structured(PunctuationCapitalizationEvalDataConfig), val_data_config + ) + if self.metrics is None: + self._setup_metrics_dictionary() if val_data_config is None: val_data_config = self._cfg.validation_ds - self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config) + self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, train=False) + loss_kw, punct_kw, capit_kw = self._get_eval_metrics_kwargs() + self.metrics['val']['loss'].append(GlobalAverageLossMetric(**loss_kw)) + self.metrics['val']['punct_class_report'].append(ClassificationReport(**punct_kw)) + self.metrics['val']['capit_class_report'].append(ClassificationReport(**capit_kw)) + + def setup_test_data(self, test_data_config: Optional[Union[Dict[str, Any], DictConfig]] = None) -> None: + """ + Sets up test data: creates dataset and sets data loader. If parameter ``test_data_config`` is not + provided, then ``test_ds`` config section will be used. See more about in data config + :ref:`description <data-config-label>` and model config :ref:`description<model-config-label>`. - def setup_test_data(self, test_data_config: Optional[Dict] = None): + Args: + test_data_config (:obj:`Union[Dict[str, Any], DictConfig]`, `optional`): a dictionary that should contain + only fields present in data config :ref:`description<data-config-label>`. + If some of the fields are missing, then they will be set according to data config + :ref:`description <data-config-label>` defaults. If ``test_data_config`` parameter is not set, then + ``test_ds`` item of :ref:`model config <model-config-label>` is used. Here model config is a + configuration used for model instantiation. + """ + if test_data_config is not None: + test_data_config = OmegaConf.create(test_data_config) + test_data_config = OmegaConf.merge( + OmegaConf.structured(PunctuationCapitalizationEvalDataConfig), test_data_config + ) + if self.metrics is None: + self._setup_metrics_dictionary() if test_data_config is None: test_data_config = self._cfg.test_ds - self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config) + self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, train=False) + loss_kw, punct_kw, capit_kw = self._get_eval_metrics_kwargs() + self.metrics['test']['loss'].append(GlobalAverageLossMetric(**loss_kw)) + self.metrics['test']['punct_class_report'].append(ClassificationReport(**punct_kw)) + self.metrics['test']['capit_class_report'].append(ClassificationReport(**capit_kw)) + + def _check_label_config_parameters(self) -> None: + """ + Checks that config items ``common_dataset_parameters.punct_label_ids`` and + ``common_dataset_parameters.punct_label_vocab_file``, + ``common_dataset_parameters.capit_label_ids`` and ``common_dataset_parameters.capit_label_vocab_file`` contain + identical label ids. Of course, if any of these parameters is ``None``, then check is not performed. + + In addition, this method checks that ``common_dataset_parameters.pad_label`` has id ``0`` in punctuation and + capitalization label ids. + """ + pli = self._cfg.common_dataset_parameters.punct_label_ids + cli = self._cfg.common_dataset_parameters.capit_label_ids + pad_label = self._cfg.common_dataset_parameters.pad_label + plvf, clvf = self._extract_label_vocab_files_from_config() + for label_ids, label_vocab_file, already_set_label_ids, label_ids_name, label_vocab_name in [ + (pli, plvf, self.punct_label_ids, 'punct_label_ids', 'punct_label_vocab_file'), + (cli, clvf, self.capit_label_ids, 'capit_label_ids', 'capit_label_vocab_file'), + ]: + if label_vocab_file is not None: + file_label_ids = load_label_ids(label_vocab_file) + if label_ids is not None and label_vocab_file is not None: + if label_ids != file_label_ids: + raise_not_equal_labels_error( + first_labels=label_ids, + second_labels=file_label_ids, + first_labels_desc=f"Labels passed in config parameter " + f"`model.common_dataset_parameters.{label_ids_name}`", + second_labels_desc=f"Labels loaded from file {plvf} passed in config " + f"parameter `model.common_dataset_parameters.{label_vocab_name}", + ) + if already_set_label_ids is not None: + config_label_ids = label_ids if label_vocab_file is None else file_label_ids + if config_label_ids is not None: + if label_vocab_file is None: + config_label_ids_source = ( + f"Labels passed in config parameter `model.common_dataset_parameters.{label_ids_name}`" + ) + else: + config_label_ids_source = ( + f"Labels loaded from file {plvf} passed in config parameter " + f"`model.common_dataset_parameters.{label_vocab_name}`" + ) + if already_set_label_ids != config_label_ids: + raise_not_equal_labels_error( + first_labels=config_label_ids, + second_labels=already_set_label_ids, + first_labels_desc=config_label_ids_source, + second_labels_desc=f"Labels which are already set in an attribute " + f"`PunctuationCapitalizationModel.{label_ids_name}`", + ) + if plvf is not None: + pli = load_label_ids(plvf) + if clvf is not None: + cli = load_label_ids(clvf) + for label_ids, parameter_name in [ + (pli, 'punct_label_vocab_file' if pli is None else 'punct_label_ids'), + (cli, 'capit_label_vocab_file' if cli is None else 'capit_label_ids'), + ]: + if label_ids is not None and label_ids[pad_label] != 0: + raise ValueError( + f"Pad label '{pad_label}' has non zero id {label_ids[pad_label]} in " + f"`model.common_dataset_parameters.{parameter_name}`." + ) - def _setup_dataloader_from_config(self, cfg: DictConfig): - # use data_dir specified in the ds_item to run evaluation on multiple datasets - if 'ds_item' in cfg and cfg.ds_item is not None: - data_dir = cfg.ds_item + def _extract_label_vocab_files_from_config(self) -> Tuple[Optional[Path], Optional[Path]]: + if self._cfg.common_dataset_parameters.label_vocab_dir is None: + if self._is_model_being_restored(): + punct_label_vocab_file = self._cfg.class_labels.punct_labels_file + capit_label_vocab_file = self._cfg.class_labels.capit_labels_file + else: + punct_label_vocab_file, capit_label_vocab_file = None, None else: - data_dir = self._cfg.dataset.data_dir - - text_file = os.path.join(data_dir, cfg.text_file) - label_file = os.path.join(data_dir, cfg.labels_file) - - dataset = BertPunctuationCapitalizationDataset( - tokenizer=self.tokenizer, - text_file=text_file, - label_file=label_file, - pad_label=self._cfg.dataset.pad_label, - punct_label_ids=self._cfg.punct_label_ids, - capit_label_ids=self._cfg.capit_label_ids, - max_seq_length=self._cfg.dataset.max_seq_length, - ignore_extra_tokens=self._cfg.dataset.ignore_extra_tokens, - ignore_start_end=self._cfg.dataset.ignore_start_end, - use_cache=self._cfg.dataset.use_cache, - num_samples=cfg.num_samples, - punct_label_ids_file=self._cfg.class_labels.punct_labels_file - if 'class_labels' in self._cfg - else 'punct_label_ids.csv', - capit_label_ids_file=self._cfg.class_labels.capit_labels_file - if 'class_labels' in self._cfg - else 'capit_label_ids.csv', - ) + label_vocab_dir = Path(self._cfg.common_dataset_parameters.label_vocab_dir).expanduser() + punct_label_vocab_file = label_vocab_dir / self._cfg.class_labels.punct_labels_file + capit_label_vocab_file = label_vocab_dir / self._cfg.class_labels.capit_labels_file + return punct_label_vocab_file, capit_label_vocab_file + + def _set_label_ids(self) -> None: + """ + Set model attributes ``punct_label_ids`` and ``capit_label_ids`` based on label ids passed in config + item ``common_dataset_parameters``. + + This method also registers artifacts ``class_labels.punct_labels_file`` and ``class_labels.capit_labels_file``. + + This method is called if do not plan to infer label ids from training file with labels. If training file + with labels is going to be used, then calling :meth:`~setup_training_data` is enough to set + ``punct_label_ids`` and ``capit_label_ids`` and register label artifacts. + """ + punct_label_vocab_file, capit_label_vocab_file = self._extract_label_vocab_files_from_config() + if punct_label_vocab_file is not None: + punct_labels_file = self.register_artifact('class_labels.punct_labels_file', str(punct_label_vocab_file)) + if punct_labels_file is None: + logging.warning( + f"The artifact `class_labels.punct_labels_file` was not found in checkpoint. Will rely on " + f"`punct_label_ids` parameter" + ) + self.punct_label_ids = self._cfg.common_dataset_parameters.punct_label_ids + else: + self.punct_label_ids = load_label_ids( + self.register_artifact('class_labels.punct_labels_file', str(punct_label_vocab_file)) + ) + elif self._cfg.common_dataset_parameters.punct_label_ids is not None: + self.punct_label_ids = self._cfg.common_dataset_parameters.punct_label_ids + else: + raise ValueError( + f"Could not set attribute `punct_label_ids`. Config parameters " + f"`model.common_dataset_parameters.punct_label_ids`, " + f"`model.common_dataset_parameters.punct_label_vocab_file` are not set. Another way to set " + f"`punct_label_ids` is calling method `setup_training_data`. That way punctuation label ids will be " + f"inferred from training set." + ) + if capit_label_vocab_file is not None: + capit_labels_file = self.register_artifact('class_labels.capit_labels_file', str(capit_label_vocab_file)) + if capit_labels_file is None: + logging.warning( + f"The artifact `class_labels.capit_labels_file` was not found in checkpoint. Will rely on " + f"`capit_label_ids` parameter" + ) + self.capit_label_ids = self._cfg.common_dataset_parameters.capit_label_ids + else: + self.capit_label_ids = load_label_ids( + self.register_artifact('class_labels.capit_labels_file', str(capit_label_vocab_file)) + ) + elif self._cfg.common_dataset_parameters.capit_label_ids is not None: + self.capit_label_ids = self._cfg.common_dataset_parameters.capit_label_ids + else: + raise ValueError( + f"Could not set attribute `capit_label_ids`. Config parameters " + f"`model.common_dataset_parameters.capit_label_ids`, " + f"`model.common_dataset_parameters.capit_label_vocab_file` are not set. Another way to set " + f"`capit_label_ids` is calling method `setup_training_data`. That way capitalization label ids will " + f"be inferred from training set." + ) + self.label_ids_are_set = True + + def _setup_dataloader_from_config(self, cfg: DictConfig, train: bool) -> torch.utils.data.DataLoader: + """ + Creates dataset and data loader according to config ``cfg``. If ``train=False`` and attributes + ``punct_label_ids`` and ``capit_label_ids`` are not set, then this method sets the attributes and registers + label artifacts. + Args: + cfg (:obj:`DictConfig`): a config which follows dataclass + :class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.PunctuationCapitalizationEvalDataConfig` + Note that list ``ds_item`` is not supported because list ``ds_item`` is unpacked by NeMo core + instruments + train (:obj:`bool`): whether train data is set. If ``True``, then label ids are not set in this function + """ + self._check_label_config_parameters() + if not self.label_ids_are_set and not train: + self._set_label_ids() + if cfg.use_tarred_dataset: + if cfg.tar_metadata_file is None: + raise ValueError( + f"If parameter `use_tarred_dataset` is `True`, then a field `tar_metadata_file` has to be a path " + f"to tarred dataset metadata file, whereas `None` is given." + ) + tar_metadata_file = Path(cfg.ds_item) / cfg.tar_metadata_file + dataset = BertPunctuationCapitalizationTarredDataset( + metadata_file=tar_metadata_file, + tokenizer=self.tokenizer, + pad_label=self._cfg.common_dataset_parameters.pad_label, + ignore_extra_tokens=self._cfg.common_dataset_parameters.ignore_extra_tokens, + ignore_start_end=self._cfg.common_dataset_parameters.ignore_start_end, + world_size=self.world_size, + global_rank=self.global_rank, + shuffle_n=cfg.tar_shuffle_n, + label_info_save_dir=cfg.label_info_save_dir, + ) + dataset.check_for_label_consistency_with_model_config( + self.punct_label_ids, + self.capit_label_ids, + self._cfg.class_labels, + self._cfg.common_dataset_parameters, + ) + else: + if cfg.text_file is None or cfg.labels_file is None: + raise ValueError( + f"If parameter `use_tarred_dataset` is `False`, then fields `text_file` and `labels_file` in " + f"dataset config must not be `None`. Whereas `text_file={cfg.text_file}` and " + f"`label_file={cfg.labels_file}`." + ) + if cfg.tokens_in_batch is None: + raise ValueError( + f"If `use_tarred_dataset` is `False`, then you need to provide `tokens_in_batch` parameter." + ) + text_file, labels_file = Path(cfg.ds_item) / cfg.text_file, Path(cfg.ds_item) / cfg.labels_file + if self.label_ids_are_set: + label_kwargs = {'punct_label_ids': self.punct_label_ids, 'capit_label_ids': self.capit_label_ids} + else: + punct_label_vocab_file, capit_label_vocab_file = self._extract_label_vocab_files_from_config() + label_kwargs = { + 'punct_label_ids': self._cfg.common_dataset_parameters.punct_label_ids, + 'capit_label_ids': self._cfg.common_dataset_parameters.capit_label_ids, + 'punct_label_vocab_file': punct_label_vocab_file, + 'capit_label_vocab_file': capit_label_vocab_file, + } + dataset = BertPunctuationCapitalizationDataset( + tokenizer=self.tokenizer, + text_file=text_file, + labels_file=labels_file, + pad_label=self._cfg.common_dataset_parameters.pad_label, + **label_kwargs, + max_seq_length=cfg.max_seq_length, + ignore_extra_tokens=self._cfg.common_dataset_parameters.ignore_extra_tokens, + ignore_start_end=self._cfg.common_dataset_parameters.ignore_start_end, + use_cache=cfg.use_cache, + num_samples=cfg.num_samples, + tokens_in_batch=cfg.tokens_in_batch, + n_jobs=cfg.n_jobs, + verbose=cfg.verbose, + get_label_frequencies=cfg.get_label_frequences, + cache_dir=cfg.cache_dir, + label_info_save_dir=cfg.label_info_save_dir, + ) + if cfg.shuffle and cfg.use_tarred_dataset: + logging.warning(f"Shuffling in dataloader is not supported for tarred dataset.") + shuffle = False + else: + shuffle = cfg.shuffle return torch.utils.data.DataLoader( dataset=dataset, collate_fn=dataset.collate_fn, - batch_size=cfg.batch_size, - shuffle=cfg.shuffle, - num_workers=self._cfg.dataset.num_workers, - pin_memory=self._cfg.dataset.pin_memory, - drop_last=self._cfg.dataset.drop_last, + batch_size=1, + shuffle=shuffle, + num_workers=cfg.num_workers, + pin_memory=cfg.pin_memory, + drop_last=cfg.drop_last, + persistent_workers=cfg.persistent_workers, ) def _setup_infer_dataloader( - self, queries: List[str], batch_size: int, max_seq_length: int, step: int, margin: int, + self, + queries: List[str], + batch_size: int, + max_seq_length: int, + step: int, + margin: int, + dataloader_kwargs: Optional[Dict[str, Any]], ) -> torch.utils.data.DataLoader: """ Setup function for a infer data loader. Args: - model: a ``PunctuationCapitalizationModel`` instance for which data loader is created. - queries: lower cased text without punctuation - batch_size: batch size to use during inference - max_seq_length: length of segments into which queries are split. ``max_seq_length`` includes ``[CLS]`` and - ``[SEP]`` so every segment contains at most ``max_seq_length-2`` tokens from input a query. - step: number of tokens by which a segment is offset to a previous segment. Parameter ``step`` cannot be greater - than ``max_seq_length-2``. - margin: number of tokens near the edge of a segment which label probabilities are not used in final prediction - computation. + queries (:obj:`List[str]`): lower cased text without punctuation + batch_size (:obj:`int`): batch size to use during inference + max_seq_length (:obj:`int`): length of segments into which queries are split. ``max_seq_length`` includes + ``[CLS]`` and ``[SEP]`` so every segment contains at most ``max_seq_length-2`` tokens from input a + query. + step (:obj:`int`): number of tokens by which a segment is offset to a previous segment. Parameter ``step`` + cannot be greater than ``max_seq_length-2``. + margin (:obj:`int`): number of tokens near the edge of a segment which label probabilities are not used in + final prediction computation. Returns: - A pytorch DataLoader. + :obj:`torch.utils.data.DataLoader`: inference data loader """ - if max_seq_length is None: - max_seq_length = self._cfg.dataset.max_seq_length - if step is None: - step = self._cfg.dataset.step - if margin is None: - margin = self._cfg.dataset.margin - + if dataloader_kwargs is None: + dataloader_kwargs = {} dataset = BertPunctuationCapitalizationInferDataset( tokenizer=self.tokenizer, queries=queries, max_seq_length=max_seq_length, step=step, margin=margin ) @@ -370,13 +852,12 @@ def _setup_infer_dataloader( collate_fn=dataset.collate_fn, batch_size=batch_size, shuffle=False, - num_workers=self._cfg.dataset.num_workers, - pin_memory=self._cfg.dataset.pin_memory, drop_last=False, + **dataloader_kwargs, ) @staticmethod - def _remove_margins(tensor, margin_size, keep_left, keep_right): + def _remove_margins(tensor: torch.Tensor, margin_size: int, keep_left: bool, keep_right: bool) -> torch.Tensor: tensor = tensor.detach().clone() if not keep_left: tensor = tensor[margin_size + 1 :] # remove left margin and CLS token @@ -393,14 +874,15 @@ def _transform_logit_to_prob_and_remove_margins_and_extract_word_probs( margin: int, is_first: Tuple[bool], is_last: Tuple[bool], - ) -> Tuple[List[np.ndarray], List[np.ndarray], List[int]]: + ) -> Tuple[List[ArrayLike], List[ArrayLike], List[int]]: """ Applies softmax to get punctuation and capitalization probabilities, applies ``subtokens_mask`` to extract probabilities for words from probabilities for tokens, removes ``margin`` probabilities near edges of a segment. Left margin of the first segment in a query and right margin of the last segment in a query are not removed. - Calculates new ``start_word_ids`` taking into the account the margins. If the left margin of a segment is removed - corresponding start word index is increased by number of words (number of nonzero values in corresponding - ``subtokens_mask``) in the margin. + Calculates new ``start_word_ids`` taking into the account the margins. If the left margin of a segment is + removed corresponding start word index is increased by number of words (number of nonzero values in + corresponding ``subtokens_mask``) in the margin. + Args: punct_logits: a float tensor of shape ``[batch_size, segment_length, number_of_punctuation_labels]`` capit_logits: a float tensor of shape ``[batch_size, segment_length, number_of_capitalization_labels]`` @@ -414,8 +896,8 @@ def _transform_logit_to_prob_and_remove_margins_and_extract_word_probs( ``[number_of_word_in_this_segment, number_of_punctuation_labels]``. Word punctuation probabilities for segments in the batch. b_capit_probs: list containing ``batch_size`` numpy arrays. The numpy arrays have shapes - ``[number_of_word_in_this_segment, number_of_capitalization_labels]``. Word capitalization probabilities for - segments in the batch. + ``[number_of_word_in_this_segment, number_of_capitalization_labels]``. Word capitalization + probabilities for segments in the batch. new_start_word_ids: indices of segment first words in a query after margin removal """ new_start_word_ids = list(start_word_ids) @@ -436,8 +918,8 @@ def _transform_logit_to_prob_and_remove_margins_and_extract_word_probs( @staticmethod def _move_acc_probs_to_token_preds( - pred: List[int], acc_prob: np.ndarray, number_of_probs_to_move: int - ) -> Tuple[List[int], np.ndarray]: + pred: List[int], acc_prob: ArrayLike, number_of_probs_to_move: int + ) -> Tuple[List[int], ArrayLike]: """ ``number_of_probs_to_move`` rows in the beginning are removed from ``acc_prob``. From every remove row the label with the largest probability is selected and appended to ``pred``. @@ -461,7 +943,7 @@ def _move_acc_probs_to_token_preds( return pred, acc_prob @staticmethod - def _update_accumulated_probabilities(acc_prob: np.ndarray, update: np.ndarray) -> np.ndarray: + def _update_accumulated_probabilities(acc_prob: ArrayLike, update: ArrayLike) -> ArrayLike: """ Args: acc_prob: numpy array of shape ``[A, L]`` @@ -472,7 +954,7 @@ def _update_accumulated_probabilities(acc_prob: np.ndarray, update: np.ndarray) acc_prob = np.concatenate([acc_prob * update[: acc_prob.shape[0]], update[acc_prob.shape[0] :]], axis=0) return acc_prob - def apply_punct_capit_predictions(self, query: str, punct_preds: List[int], capit_preds: List[int]) -> str: + def _apply_punct_capit_predictions(self, query: str, punct_preds: List[int], capit_preds: List[int]) -> str: """ Restores punctuation and capitalization in ``query``. Args: @@ -489,24 +971,25 @@ def apply_punct_capit_predictions(self, query: str, punct_preds: List[int], capi assert len(query) == len( capit_preds ), f"len(query)={len(query)} len(capit_preds)={len(capit_preds)}, query[:30]={query[:30]}" - punct_ids_to_labels = {v: k for k, v in self._cfg.punct_label_ids.items()} - capit_ids_to_labels = {v: k for k, v in self._cfg.capit_label_ids.items()} + punct_ids_to_labels = {v: k for k, v in self.punct_label_ids.items()} + capit_ids_to_labels = {v: k for k, v in self.capit_label_ids.items()} query_with_punct_and_capit = '' for j, word in enumerate(query): punct_label = punct_ids_to_labels[punct_preds[j]] capit_label = capit_ids_to_labels[capit_preds[j]] - if capit_label != self._cfg.dataset.pad_label: + if capit_label != self._cfg.common_dataset_parameters.pad_label: word = word.capitalize() query_with_punct_and_capit += word - if punct_label != self._cfg.dataset.pad_label: + if punct_label != self._cfg.common_dataset_parameters.pad_label: query_with_punct_and_capit += punct_label query_with_punct_and_capit += ' ' return query_with_punct_and_capit[:-1] - def get_labels(self, punct_preds: List[int], capit_preds: List[int]) -> str: + def _get_labels(self, punct_preds: List[int], capit_preds: List[int]) -> str: """ - Returns punctuation and capitalization labels in NeMo format (see https://docs.nvidia.com/deeplearning/nemo/ + Returns punctuation and capitalization labels in NeMo format for encoded punctuation ``punct_preds`` + and ``capit_preds`` labels (see https://docs.nvidia.com/deeplearning/nemo/ user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format). Args: punct_preds: ids of predicted punctuation labels @@ -517,8 +1000,8 @@ def get_labels(self, punct_preds: List[int], capit_preds: List[int]) -> str: assert len(capit_preds) == len( punct_preds ), f"len(capit_preds)={len(capit_preds)} len(punct_preds)={len(punct_preds)}" - punct_ids_to_labels = {v: k for k, v in self._cfg.punct_label_ids.items()} - capit_ids_to_labels = {v: k for k, v in self._cfg.capit_label_ids.items()} + punct_ids_to_labels = {v: k for k, v in self.punct_label_ids.items()} + capit_ids_to_labels = {v: k for k, v in self.capit_label_ids.items()} result = '' for capit_label, punct_label in zip(capit_preds, punct_preds): punct_label = punct_ids_to_labels[punct_label] @@ -534,41 +1017,49 @@ def add_punctuation_capitalization( step: int = 8, margin: int = 16, return_labels: bool = False, + dataloader_kwargs: Dict[str, Any] = None, ) -> List[str]: """ Adds punctuation and capitalization to the queries. Use this method for inference. Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments - which then processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization - including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is shift - between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near + which are processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization + including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is a + shift between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near borders of segments which have only one side context. If segments overlap, probabilities of overlapping predictions are multiplied and then the label with corresponding to the maximum probability is selected. Args: - queries: lower cased text without punctuation - batch_size: batch size to use during inference - max_seq_length: maximum sequence length of segment after tokenization. - step: relative shift of consequent segments into which long queries are split. Long queries are split into - segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are - tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is - tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. - margin: number of subtokens in the beginning and the end of segments which are not used for prediction - computation. The first segment does not have left margin and the last segment does not have right - margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``, - ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments - ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], + queries (:obj:`List[str]`): lower cased text without punctuation. + batch_size (:obj:`List[str]`, `optional`): batch size to use during inference. If ``batch_size`` parameter + is not provided, then it will be equal to length of ``queries`` list. + max_seq_length (:obj:`int`, `optional`, defaults to :obj:`64`): maximum sequence length of a segment after + tokenization including :code:`[CLS]` and :code:`[SEP]` tokens. + step (:obj:`int`, `optional`, defaults to :obj:`8`): relative shift of consequent segments into which long + queries are split. Long queries are split into segments which can overlap. Parameter ``step`` controls + such overlapping. Imagine that queries are tokenized into characters, ``max_seq_length=5``, and + ``step=2``. In such case, query ``"hello"`` is tokenized into segments + ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. + margin (:obj:`int`, `optional`, defaults to :obj:`16`): number of subtokens in the beginning and the end of + segments which are not used for prediction computation. The first segment does not have left margin and + the last segment does not have right margin. For example, if an input sequence is tokenized into + characters, ``max_seq_length=5``, ``step=1``, and ``margin=1``, then query ``"hello"`` will be + tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions computation, margins are removed. In the next list, subtokens which logits are not used for final predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*], ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``. - return_labels: whether to return labels in NeMo format (see https://docs.nvidia.com/deeplearning/nemo/ - user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format) instead of queries - with restored punctuation and capitalization. + return_labels (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to return labels in NeMo format + (see :ref:`nlp/punctuation_and_capitalization/NeMo Data Format`) instead of queries with restored + punctuation and capitalization. + dataloader_kwargs (:obj:`Dict[str, Any]`, `optional`): an optional dictionary with parameters of PyTorch + data loader. May include keys: ``'num_workers'``, ``'pin_memory'``, ``'worker_init_fn'``, + ``'prefetch_factor'``, ``'persistent_workers'``. Returns: - result: text with added capitalization and punctuation or punctuation and capitalization labels + :obj:`List[str]`: a list of queries with restored capitalization and punctuation if + ``return_labels=False``, else a list of punctuation and capitalization labels strings for all queries """ if len(queries) == 0: return [] @@ -579,7 +1070,9 @@ def add_punctuation_capitalization( mode = self.training try: self.eval() - infer_datalayer = self._setup_infer_dataloader(queries, batch_size, max_seq_length, step, margin) + infer_datalayer = self._setup_infer_dataloader( + queries, batch_size, max_seq_length, step, margin, dataloader_kwargs + ) # Predicted labels for queries. List of labels for every query all_punct_preds: List[List[int]] = [[] for _ in queries] all_capit_preds: List[List[int]] = [[] for _ in queries] @@ -591,8 +1084,8 @@ def add_punctuation_capitalization( # input query. When all segments with a word are processed, a label with the highest probability # (or product of probabilities) is chosen and appended to an appropriate list in `all_preds`. After adding # prediction to `all_preds`, probabilities for a word are removed from `acc_probs`. - acc_punct_probs: List[Optional[np.ndarray]] = [None for _ in queries] - acc_capit_probs: List[Optional[np.ndarray]] = [None for _ in queries] + acc_punct_probs: List[Optional[ArrayLike]] = [None for _ in queries] + acc_capit_probs: List[Optional[ArrayLike]] = [None for _ in queries] d = self.device for batch_i, batch in tqdm( enumerate(infer_datalayer), total=ceil(len(infer_datalayer.dataset) / batch_size), unit="batch" @@ -625,9 +1118,9 @@ def add_punctuation_capitalization( all_preds[q_i], acc_probs[q_i] = self._move_acc_probs_to_token_preds(pred, prob, len(prob)) for i, query in enumerate(queries): result.append( - self.get_labels(all_punct_preds[i], all_capit_preds[i]) + self._get_labels(all_punct_preds[i], all_capit_preds[i]) if return_labels - else self.apply_punct_capit_predictions(query, all_punct_preds[i], all_capit_preds[i]) + else self._apply_punct_capit_predictions(query, all_punct_preds[i], all_capit_preds[i]) ) finally: # set mode back to its original value @@ -635,32 +1128,34 @@ def add_punctuation_capitalization( return result @classmethod - def list_available_models(cls) -> Optional[Dict[str, str]]: + def list_available_models(cls) -> List[PretrainedModelInfo]: """ - This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud. + This method returns a list of pre-trained models which can be instantiated directly from NVIDIA's NGC cloud. Returns: - List of available pre-trained models. + :obj:`List[PretrainedModelInfo]`: a list of available pre-trained models. """ - result = [] - result.append( + result = [ PretrainedModelInfo( pretrained_model_name="punctuation_en_bert", - location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_bert/versions/1.0.0rc1/files/punctuation_en_bert.nemo", - description="The model was trained with NeMo BERT base uncased checkpoint on a subset of data from the following sources: Tatoeba sentences, books from Project Gutenberg, Fisher transcripts.", - ) - ) - result.append( + location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_bert/versions/1.0.0rc1/" + "files/punctuation_en_bert.nemo", + description="The model was trained with NeMo BERT base uncased checkpoint on a subset of data from " + "the following sources: Tatoeba sentences, books from Project Gutenberg, Fisher transcripts.", + ), PretrainedModelInfo( pretrained_model_name="punctuation_en_distilbert", - location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_distilbert/versions/1.0.0rc1/files/punctuation_en_distilbert.nemo", - description="The model was trained with DiltilBERT base uncased checkpoint from HuggingFace on a subset of data from the following sources: Tatoeba sentences, books from Project Gutenberg, Fisher transcripts.", - ) - ) + location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/punctuation_en_distilbert/versions/" + "1.0.0rc1/files/punctuation_en_distilbert.nemo", + description="The model was trained with DiltilBERT base uncased checkpoint from HuggingFace on a " + "subset of data from the following sources: Tatoeba sentences, books from Project Gutenberg, " + "Fisher transcripts.", + ), + ] return result @property - def input_module(self): + def input_module(self) -> Any: return self.bert_model @property diff --git a/nemo/collections/nlp/modules/common/tokenizer_utils.py b/nemo/collections/nlp/modules/common/tokenizer_utils.py --- a/nemo/collections/nlp/modules/common/tokenizer_utils.py +++ b/nemo/collections/nlp/modules/common/tokenizer_utils.py @@ -82,12 +82,14 @@ def get_tokenizer( Args: tokenizer_name: sentencepiece or pretrained model from the hugging face list, for example: bert-base-cased - To see the list of all HuggingFace pretrained models, use: nemo_nlp.modules.common.get_huggingface_pretrained_lm_models_list() + To see the list of all HuggingFace pretrained models, use: + nemo_nlp.modules.common.get_huggingface_pretrained_lm_models_list() tokenizer_model: tokenizer model file of sentencepiece or youtokentome special_tokens: dict of special tokens vocab_file: path to vocab file use_fast: (only for HuggingFace AutoTokenizer) set to True to use fast HuggingFace tokenizer - bpe_dropout: (only supported by YTTM tokenizer) BPE dropout tries to corrupt the standard segmentation procedure of BPE to help + bpe_dropout: (only supported by YTTM tokenizer) BPE dropout tries to corrupt the standard segmentation + procedure of BPE to help model better learn word compositionality and become robust to segmentation errors. It has emperically been shown to improve inference time BLEU scores. """ @@ -120,7 +122,8 @@ def get_tokenizer( return CharTokenizer(vocab_file=vocab_file, **special_tokens_dict) logging.info( - f"Getting HuggingFace AutoTokenizer with pretrained_model_name: {tokenizer_name}, vocab_file: {vocab_file}, special_tokens_dict: {special_tokens_dict}, and use_fast: {use_fast}" + f"Getting HuggingFace AutoTokenizer with pretrained_model_name: {tokenizer_name}, vocab_file: {vocab_file}, " + f"special_tokens_dict: {special_tokens_dict}, and use_fast: {use_fast}" ) return AutoTokenizer( pretrained_model_name=tokenizer_name, diff --git a/nemo/core/connectors/save_restore_connector.py b/nemo/core/connectors/save_restore_connector.py --- a/nemo/core/connectors/save_restore_connector.py +++ b/nemo/core/connectors/save_restore_connector.py @@ -47,7 +47,7 @@ def save_to(self, model, save_path: str): Args: model: ModelPT object to be saved. save_path: Path to .nemo file where model instance should be saved - """ + """ if is_global_rank_zero(): with tempfile.TemporaryDirectory() as tmpdir: @@ -74,27 +74,27 @@ def restore_from( trainer: Trainer = None, ): """ - Restores model instance (weights and configuration) into .nemo file - - Args: - restore_path: path to .nemo file from which model should be instantiated - override_config_path: path to a yaml config that will override the internal - config file or an OmegaConf / DictConfig object representing the model config. - map_location: Optional torch.device() to map the instantiated model to a device. - By default (None), it will select a GPU if available, falling back to CPU otherwise. - strict: Passed to load_state_dict. By default True - return_config: If set to true, will return just the underlying config of the restored - model as an OmegaConf DictConfig object without instantiating the model. - - Example: - ``` - model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo') - assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel) - ``` - - Returns: - An instance of type cls or its underlying config (if return_config is set). - """ + Restores model instance (weights and configuration) into .nemo file + + Args: + restore_path: path to .nemo file from which model should be instantiated + override_config_path: path to a yaml config that will override the internal + config file or an OmegaConf / DictConfig object representing the model config. + map_location: Optional torch.device() to map the instantiated model to a device. + By default (None), it will select a GPU if available, falling back to CPU otherwise. + strict: Passed to load_state_dict. By default True + return_config: If set to true, will return just the underlying config of the restored + model as an OmegaConf DictConfig object without instantiating the model. + + Example: + ``` + model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo') + assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel) + ``` + + Returns: + An instance of type cls or its underlying config (if return_config is set). + """ # Get path where the command is executed - the artifacts will be "retrieved" there # (original .nemo behavior) cwd = os.getcwd() @@ -228,31 +228,33 @@ def extract_state_dict_from(self, restore_path: str, save_dir: str, split_by_mod return state_dict def register_artifact(self, model, config_path: str, src: str, verify_src_exists: bool = True): - """ Register model artifacts with this function. These artifacts (files) will be included inside .nemo file - when model.save_to("mymodel.nemo") is called. - - How it works: - 1. It always returns existing absolute path which can be used during Model constructor call - EXCEPTION: src is None or "" in which case nothing will be done and src will be returned - 2. It will add (config_path, model_utils.ArtifactItem()) pair to self.artifacts - - If "src" is local existing path, then it will be returned in absolute path form. - elif "src" starts with "nemo_file:unique_artifact_name": - .nemo will be untarred to a temporary folder location and an actual existing path will be returned - else an error will be raised. - - WARNING: use .register_artifact calls in your models' constructors. - The returned path is not guaranteed to exist after you have exited your model's constuctor. - - Args: - model: ModelPT object to register artifact for. - config_path (str): Artifact key. Usually corresponds to the model config. - src (str): Path to artifact. - verify_src_exists (bool): If set to False, then the artifact is optional and register_artifact will return None even if - src is not found. Defaults to True. - - Returns: - str: If src is not None or empty it always returns absolute path which is guaranteed to exists during model instnce life + """ + Register model artifacts with this function. These artifacts (files) will be included inside .nemo file + when model.save_to("mymodel.nemo") is called. + + How it works: + 1. It always returns existing absolute path which can be used during Model constructor call + EXCEPTION: src is None or "" in which case nothing will be done and src will be returned + 2. It will add (config_path, model_utils.ArtifactItem()) pair to self.artifacts + + If "src" is local existing path, then it will be returned in absolute path form. + elif "src" starts with "nemo_file:unique_artifact_name": + .nemo will be untarred to a temporary folder location and an actual existing path will be returned + else an error will be raised. + + WARNING: use .register_artifact calls in your models' constructors. + The returned path is not guaranteed to exist after you have exited your model's constructor. + + Args: + model: ModelPT object to register artifact for. + config_path (str): Artifact key. Usually corresponds to the model config. + src (str): Path to artifact. + verify_src_exists (bool): If set to False, then the artifact is optional and register_artifact will return + None even if src is not found. Defaults to True. + + Returns: + str: If src is not None or empty it always returns absolute path which is guaranteed to exists during model + instance life """ app_state = AppState() diff --git a/nemo/utils/config_utils.py b/nemo/utils/config_utils.py --- a/nemo/utils/config_utils.py +++ b/nemo/utils/config_utils.py @@ -37,7 +37,7 @@ def update_model_config( Assumes the `update_cfg` is a DictConfig (either generated manually, via hydra or instantiated via yaml/model.cfg). This update_cfg is then used to override the default values preset inside the ModelPT config class. - If `drop_missing_subconfigs` is set, the certain sub-configs of the ModelPT config class will be removed, iff + If `drop_missing_subconfigs` is set, the certain sub-configs of the ModelPT config class will be removed, if they are not found in the mirrored `update_cfg`. The following sub-configs are subject to potential removal: - `train_ds` - `validation_ds` </patch>
diff --git a/tests/collections/nlp/test_pretrained_models_performance.py b/tests/collections/nlp/test_pretrained_models_performance.py --- a/tests/collections/nlp/test_pretrained_models_performance.py +++ b/tests/collections/nlp/test_pretrained_models_performance.py @@ -48,6 +48,29 @@ def get_metrics(data_dir, model): return metrics +def get_metrics_new_format(data_dir, model): + trainer = pl.Trainer(gpus=[0]) + + model.set_trainer(trainer) + + test_ds = OmegaConf.create( + { + 'use_tarred_dataset': False, + 'ds_item': data_dir, + 'text_file': 'text_dev.txt', + 'labels_file': 'labels_dev.txt', + 'shuffle': False, + 'num_samples': -1, + 'tokens_in_batch': 512, + 'use_cache': False, + } + ) + model.setup_test_data(test_data_config=test_ds) + metrics = trainer.test(model)[0] + + return metrics + + def data_exists(data_dir): return os.path.exists(data_dir) @@ -62,15 +85,15 @@ class TestPretrainedModelPerformance: def test_punct_capit_with_bert(self): data_dir = '/home/TestData/nlp/token_classification_punctuation/fisher' model = models.PunctuationCapitalizationModel.from_pretrained("punctuation_en_bert") - metrics = get_metrics(data_dir, model) + metrics = get_metrics_new_format(data_dir, model) - assert abs(metrics['punct_precision'] - 52.3024) < 0.001 - assert abs(metrics['punct_recall'] - 58.9220) < 0.001 - assert abs(metrics['punct_f1'] - 53.2976) < 0.001 - assert abs(metrics['capit_precision'] - 87.0707) < 0.001 - assert abs(metrics['capit_recall'] - 87.0707) < 0.001 - assert abs(metrics['capit_f1'] - 87.0707) < 0.001 - assert int(model.punct_class_report.total_examples) == 128 + assert abs(metrics['test_punct_precision'] - 52.3024) < 0.001 + assert abs(metrics['test_punct_recall'] - 58.9220) < 0.001 + assert abs(metrics['test_punct_f1'] - 53.2976) < 0.001 + assert abs(metrics['test_capit_precision'] - 87.0707) < 0.001 + assert abs(metrics['test_capit_recall'] - 87.0707) < 0.001 + assert abs(metrics['test_capit_f1'] - 87.0707) < 0.001 + assert int(model.metrics['test']['punct_class_report'][0].total_examples) == 128 preds_512 = model.add_punctuation_capitalization(['what can i do for you today'], max_seq_length=512)[0] assert preds_512 == 'What can I do for you today?' @@ -94,12 +117,12 @@ def test_punct_capit_with_bert(self): def test_punct_capit_with_distilbert(self): data_dir = '/home/TestData/nlp/token_classification_punctuation/fisher' model = models.PunctuationCapitalizationModel.from_pretrained("punctuation_en_distilbert") - metrics = get_metrics(data_dir, model) + metrics = get_metrics_new_format(data_dir, model) - assert abs(metrics['punct_precision'] - 53.0826) < 0.001 - assert abs(metrics['punct_recall'] - 56.2905) < 0.001 - assert abs(metrics['punct_f1'] - 52.4225) < 0.001 - assert int(model.punct_class_report.total_examples) == 128 + assert abs(metrics['test_punct_precision'] - 53.0826) < 0.001 + assert abs(metrics['test_punct_recall'] - 56.2905) < 0.001 + assert abs(metrics['test_punct_f1'] - 52.4225) < 0.001 + assert int(model.metrics['test']['punct_class_report'][0].total_examples) == 128 @pytest.mark.with_downloads() @pytest.mark.unit
1.0
NVIDIA__NeMo-6060
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spectrogram Enhancer doesn't generalize to spectrogram lengths unseen during training **Describe the bug** If one trains TTS spectrogram enhancer (#5565) on short spectrograms, at inference time it doesn't generalize to longer ones. A patch in the beginning gets enhanced but further frames do not. Example (before, after): ![before](https://user-images.githubusercontent.com/8864149/218520151-230501b1-4e9d-4307-b51e-526440b915c8.png) ![after](https://user-images.githubusercontent.com/8864149/218520199-22187a4b-9cf8-4ac9-b3ce-07c320d3d3b6.png) **Steps/Code to reproduce bug** 1. Train a spectrogram enhancer 2. Apply it to a spectrogram that's longer than anything from the training set 3. Only a patch in the beginning gets enhanced **Expected behavior** The whole spectrogram should have got additional details, not just the first patch </issue> <code> [start of README.rst] 1 2 |status| |documentation| |license| |lgtm_grade| |lgtm_alerts| |black| 3 4 .. |status| image:: http://www.repostatus.org/badges/latest/active.svg 5 :target: http://www.repostatus.org/#active 6 :alt: Project Status: Active – The project has reached a stable, usable state and is being actively developed. 7 8 .. |documentation| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 9 :alt: Documentation 10 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 11 12 .. |license| image:: https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg 13 :target: https://github.com/NVIDIA/NeMo/blob/master/LICENSE 14 :alt: NeMo core license and license for collections in this repo 15 16 .. |lgtm_grade| image:: https://img.shields.io/lgtm/grade/python/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 17 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/context:python 18 :alt: Language grade: Python 19 20 .. |lgtm_alerts| image:: https://img.shields.io/lgtm/alerts/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 21 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/alerts/ 22 :alt: Total alerts 23 24 .. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg 25 :target: https://github.com/psf/black 26 :alt: Code style: black 27 28 .. _main-readme: 29 30 **NVIDIA NeMo** 31 =============== 32 33 Introduction 34 ------------ 35 36 NVIDIA NeMo is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), 37 text-to-speech synthesis (TTS), large language models (LLMs), and 38 natural language processing (NLP). 39 The primary objective of NeMo is to help researchers from industry and academia to reuse prior work (code and pretrained models) 40 and make it easier to create new `conversational AI models <https://developer.nvidia.com/conversational-ai#started>`_. 41 42 All NeMo models are trained with `Lightning <https://github.com/Lightning-AI/lightning>`_ and 43 training is automatically scalable to 1000s of GPUs. 44 Additionally, NeMo Megatron LLM models can be trained up to 1 trillion parameters using tensor and pipeline model parallelism. 45 NeMo models can be optimized for inference and deployed for production use-cases with `NVIDIA Riva <https://developer.nvidia.com/riva>`_. 46 47 Getting started with NeMo is simple. 48 State of the Art pretrained NeMo models are freely available on `HuggingFace Hub <https://huggingface.co/models?library=nemo&sort=downloads&search=nvidia>`_ and 49 `NVIDIA NGC <https://catalog.ngc.nvidia.com/models?query=nemo&orderBy=weightPopularDESC>`_. 50 These models can be used to transcribe audio, synthesize speech, or translate text in a just a few lines of code. 51 52 We have have extensive `tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_ that 53 can all be run on `Google Colab <https://colab.research.google.com>`_. 54 55 For advanced users that want to train NeMo models from scratch or finetune existing NeMo models 56 we have a full suite of `example scripts <https://github.com/NVIDIA/NeMo/tree/update_readme_into/examples>`_ that support multi-GPU/multi-node training. 57 58 Also see our `introductory video <https://www.youtube.com/embed/wBgpMf_KQVw>`_ for a high level overview of NeMo. 59 60 Key Features 61 ------------ 62 63 * Speech processing 64 * `HuggingFace Space for Audio Transcription (File, Microphone and YouTube) <https://huggingface.co/spaces/smajumdar/nemo_multilingual_language_id>`_ 65 * `Automatic Speech Recognition (ASR) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/intro.html>`_ 66 * Supported models: Jasper, QuartzNet, CitriNet, Conformer-CTC, Conformer-Transducer, Squeezeformer-CTC, Squeezeformer-Transducer, ContextNet, LSTM-Transducer (RNNT), LSTM-CTC, FastConformer-CTC, FastConformer-Transducer... 67 * Supports CTC and Transducer/RNNT losses/decoders 68 * NeMo Original `Multi-blank Transducers <https://arxiv.org/abs/2211.03541>`_ 69 * Beam Search decoding 70 * `Language Modelling for ASR <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html>`_: N-gram LM in fusion with Beam Search decoding, Neural Rescoring with Transformer 71 * Streaming and Buffered ASR (CTC/Transducer) - `Chunked Inference Examples <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_chunked_inference>`_ 72 * `Support of long audios for Conformer with memory efficient local attention <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html#inference-on-long-audio>`_ 73 * `Speech Classification, Speech Command Recognition and Language Identification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/intro.html>`_: MatchboxNet (Command Recognition), AmberNet (LangID) 74 * `Voice activity Detection (VAD) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speech_classification/models.html#marblenet-vad>`_: MarbleNet 75 * ASR with VAD Inference - `Example <https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_vad>`_ 76 * `Speaker Recognition <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/intro.html>`_: TitaNet, ECAPA_TDNN, SpeakerNet 77 * `Speaker Diarization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_diarization/intro.html>`_ 78 * Clustering Diarizer: TitaNet, ECAPA_TDNN, SpeakerNet 79 * Neural Diarizer: MSDD (Multi-scale Diarization Decoder) 80 * `Speech Intent Detection and Slot Filling <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_intent_slot/intro.html>`_: Conformer-Transformer 81 * `Pretrained models on different languages. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_: English, Spanish, German, Russian, Chinese, French, Italian, Polish, ... 82 * `NGC collection of pre-trained speech processing models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr>`_ 83 * Natural Language Processing 84 * `NeMo Megatron pre-training of Large Language Models <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/nemo_megatron/intro.html>`_ 85 * `Neural Machine Translation (NMT) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/machine_translation/machine_translation.html>`_ 86 * `Punctuation and Capitalization <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html>`_ 87 * `Token classification (named entity recognition) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/token_classification.html>`_ 88 * `Text classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_classification.html>`_ 89 * `Joint Intent and Slot Classification <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/joint_intent_slot.html>`_ 90 * `Question answering <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/question_answering.html>`_ 91 * `GLUE benchmark <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/glue_benchmark.html>`_ 92 * `Information retrieval <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/information_retrieval.html>`_ 93 * `Entity Linking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/entity_linking.html>`_ 94 * `Dialogue State Tracking <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/sgd_qa.html>`_ 95 * `Prompt Learning <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/nemo_megatron/prompt_learning.html>`_ 96 * `NGC collection of pre-trained NLP models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_nlp>`_ 97 * `Synthetic Tabular Data Generation <https://developer.nvidia.com/blog/generating-synthetic-data-with-transformers-a-solution-for-enterprise-data-challenges/>`_ 98 * `Speech synthesis (TTS) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tts/intro.html#>`_ 99 * Spectrogram generation: Tacotron2, GlowTTS, TalkNet, FastPitch, FastSpeech2, Mixer-TTS, Mixer-TTS-X 100 * Vocoders: WaveGlow, SqueezeWave, UniGlow, MelGAN, HiFiGAN, UnivNet 101 * End-to-end speech generation: FastPitch_HifiGan_E2E, FastSpeech2_HifiGan_E2E 102 * `NGC collection of pre-trained TTS models. <https://ngc.nvidia.com/catalog/collections/nvidia:nemo_tts>`_ 103 * `Tools <https://github.com/NVIDIA/NeMo/tree/stable/tools>`_ 104 * `Text Processing (text normalization and inverse text normalization) <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization/intro.html>`_ 105 * `CTC-Segmentation tool <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/ctc_segmentation.html>`_ 106 * `Speech Data Explorer <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/speech_data_explorer.html>`_: a dash-based tool for interactive exploration of ASR/TTS datasets 107 108 109 Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes. 110 111 Requirements 112 ------------ 113 114 1) Python 3.8 or above 115 2) Pytorch 1.10.0 or above 116 3) NVIDIA GPU for training 117 118 Documentation 119 ------------- 120 121 .. |main| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 122 :alt: Documentation Status 123 :scale: 100% 124 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 125 126 .. |stable| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=stable 127 :alt: Documentation Status 128 :scale: 100% 129 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/ 130 131 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 132 | Version | Status | Description | 133 +=========+=============+==========================================================================================================================================+ 134 | Latest | |main| | `Documentation of the latest (i.e. main) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/>`_ | 135 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 136 | Stable | |stable| | `Documentation of the stable (i.e. most recent release) branch. <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/>`_ | 137 +---------+-------------+------------------------------------------------------------------------------------------------------------------------------------------+ 138 139 Tutorials 140 --------- 141 A great way to start with NeMo is by checking `one of our tutorials <https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/starthere/tutorials.html>`_. 142 143 Getting help with NeMo 144 ---------------------- 145 FAQ can be found on NeMo's `Discussions board <https://github.com/NVIDIA/NeMo/discussions>`_. You are welcome to ask questions or start discussions there. 146 147 148 Installation 149 ------------ 150 151 Conda 152 ~~~~~ 153 154 We recommend installing NeMo in a fresh Conda environment. 155 156 .. code-block:: bash 157 158 conda create --name nemo python==3.8 159 conda activate nemo 160 161 Install PyTorch using their `configurator <https://pytorch.org/get-started/locally/>`_. 162 163 .. code-block:: bash 164 165 conda install pytorch torchvision torchaudio cudatoolkit=11.3 -c pytorch 166 167 .. note:: 168 169 The command used to install PyTorch may depend on your system. 170 171 Pip 172 ~~~ 173 Use this installation mode if you want the latest released version. 174 175 .. code-block:: bash 176 177 apt-get update && apt-get install -y libsndfile1 ffmpeg 178 pip install Cython 179 pip install nemo_toolkit['all'] 180 181 .. note:: 182 183 Depending on the shell used, you may need to use ``"nemo_toolkit[all]"`` instead in the above command. 184 185 Pip from source 186 ~~~~~~~~~~~~~~~ 187 Use this installation mode if you want the a version from particular GitHub branch (e.g main). 188 189 .. code-block:: bash 190 191 apt-get update && apt-get install -y libsndfile1 ffmpeg 192 pip install Cython 193 python -m pip install git+https://github.com/NVIDIA/NeMo.git@{BRANCH}#egg=nemo_toolkit[all] 194 195 196 From source 197 ~~~~~~~~~~~ 198 Use this installation mode if you are contributing to NeMo. 199 200 .. code-block:: bash 201 202 apt-get update && apt-get install -y libsndfile1 ffmpeg 203 git clone https://github.com/NVIDIA/NeMo 204 cd NeMo 205 ./reinstall.sh 206 207 .. note:: 208 209 If you only want the toolkit without additional conda-based dependencies, you may replace ``reinstall.sh`` 210 with ``pip install -e .`` when your PWD is the root of the NeMo repository. 211 212 RNNT 213 ~~~~ 214 Note that RNNT requires numba to be installed from conda. 215 216 .. code-block:: bash 217 218 conda remove numba 219 pip uninstall numba 220 conda install -c conda-forge numba 221 222 NeMo Megatron 223 ~~~~~~~~~~~~~ 224 NeMo Megatron training requires NVIDIA Apex to be installed. 225 Install it manually if not using the NVIDIA PyTorch container. 226 227 .. code-block:: bash 228 229 git clone https://github.com/ericharper/apex.git 230 cd apex 231 git checkout nm_v1.15.0 232 pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" --global-option="--fast_layer_norm" --global-option="--distributed_adam" --global-option="--deprecated_fused_adam" ./ 233 234 Transformer Engine 235 ~~~~~~~~~~~~~~~~~~ 236 NeMo Megatron GPT has been integrated with `NVIDIA Transformer Engine <https://github.com/NVIDIA/TransformerEngine>`_ 237 Transformer Engine enables FP8 training on NVIDIA Hopper GPUs. 238 `Install <https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/installation.html>`_ it manually if not using the NVIDIA PyTorch container. 239 240 .. note:: 241 242 Transformer Engine requires PyTorch to be built with CUDA 11.8. 243 244 NeMo Text Processing 245 ~~~~~~~~~~~~~~~~~~~~ 246 NeMo Text Processing, specifically (Inverse) Text Normalization, requires `Pynini <https://pypi.org/project/pynini/>`_ to be installed. 247 248 .. code-block:: bash 249 250 bash NeMo/nemo_text_processing/install_pynini.sh 251 252 Docker containers: 253 ~~~~~~~~~~~~~~~~~~ 254 We release NeMo containers alongside NeMo releases. For example, NeMo ``r1.14.0`` comes with container ``nemo:22.11``, you may find more details about released containers in `releases page <https://github.com/NVIDIA/NeMo/releases>`_. 255 256 To use built container, please run 257 258 .. code-block:: bash 259 260 docker pull nvcr.io/nvidia/nemo:22.11 261 262 To build a nemo container with Dockerfile from a branch, please run 263 264 .. code-block:: bash 265 266 DOCKER_BUILDKIT=1 docker build -f Dockerfile -t nemo:latest . 267 268 269 If you chose to work with main branch, we recommend using NVIDIA's PyTorch container version 23.01-py3 and then installing from GitHub. 270 271 .. code-block:: bash 272 273 docker run --gpus all -it --rm -v <nemo_github_folder>:/NeMo --shm-size=8g \ 274 -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit \ 275 stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:23.01-py3 276 277 Examples 278 -------- 279 280 Many examples can be found under `"Examples" <https://github.com/NVIDIA/NeMo/tree/stable/examples>`_ folder. 281 282 283 Contributing 284 ------------ 285 286 We welcome community contributions! Please refer to the `CONTRIBUTING.md <https://github.com/NVIDIA/NeMo/blob/stable/CONTRIBUTING.md>`_ CONTRIBUTING.md for the process. 287 288 Publications 289 ------------ 290 291 We provide an ever growing list of publications that utilize the NeMo framework. Please refer to `PUBLICATIONS.md <https://github.com/NVIDIA/NeMo/tree/stable/PUBLICATIONS.md>`_. We welcome the addition of your own articles to this list ! 292 293 Citation 294 -------- 295 296 .. code-block:: bash 297 298 @article{kuchaiev2019nemo, 299 title={Nemo: a toolkit for building ai applications using neural modules}, 300 author={Kuchaiev, Oleksii and Li, Jason and Nguyen, Huyen and Hrinchuk, Oleksii and Leary, Ryan and Ginsburg, Boris and Kriman, Samuel and Beliaev, Stanislav and Lavrukhin, Vitaly and Cook, Jack and others}, 301 journal={arXiv preprint arXiv:1909.09577}, 302 year={2019} 303 } 304 305 License 306 ------- 307 NeMo is under `Apache 2.0 license <https://github.com/NVIDIA/NeMo/blob/stable/LICENSE>`_. 308 [end of README.rst] [start of nemo/collections/tts/modules/spectrogram_enhancer.py] 1 # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # MIT License 16 # 17 # Copyright (c) 2020 Phil Wang 18 # 19 # Permission is hereby granted, free of charge, to any person obtaining a copy 20 # of this software and associated documentation files (the "Software"), to deal 21 # in the Software without restriction, including without limitation the rights 22 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 23 # copies of the Software, and to permit persons to whom the Software is 24 # furnished to do so, subject to the following conditions: 25 # 26 # The above copyright notice and this permission notice shall be included in all 27 # copies or substantial portions of the Software. 28 # 29 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 30 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 31 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 32 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 33 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 34 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 # SOFTWARE. 36 37 # The following is largely based on code from https://github.com/lucidrains/stylegan2-pytorch 38 39 import math 40 from functools import partial 41 from math import log2 42 from typing import List 43 44 import torch 45 import torch.nn.functional as F 46 from einops import rearrange 47 from kornia.filters import filter2d 48 49 from nemo.collections.tts.helpers.helpers import mask_sequence_tensor 50 51 52 class Blur(torch.nn.Module): 53 def __init__(self): 54 super().__init__() 55 f = torch.Tensor([1, 2, 1]) 56 self.register_buffer("f", f) 57 58 def forward(self, x): 59 f = self.f 60 f = f[None, None, :] * f[None, :, None] 61 return filter2d(x, f, normalized=True) 62 63 64 class EqualLinear(torch.nn.Module): 65 def __init__(self, in_dim, out_dim, lr_mul=1, bias=True): 66 super().__init__() 67 self.weight = torch.nn.Parameter(torch.randn(out_dim, in_dim)) 68 if bias: 69 self.bias = torch.nn.Parameter(torch.zeros(out_dim)) 70 71 self.lr_mul = lr_mul 72 73 def forward(self, input): 74 return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul) 75 76 77 class StyleMapping(torch.nn.Module): 78 def __init__(self, emb, depth, lr_mul=0.1): 79 super().__init__() 80 81 layers = [] 82 for _ in range(depth): 83 layers.extend([EqualLinear(emb, emb, lr_mul), torch.nn.LeakyReLU(0.2, inplace=True)]) 84 85 self.net = torch.nn.Sequential(*layers) 86 87 def forward(self, x): 88 x = F.normalize(x, dim=1) 89 return self.net(x) 90 91 92 class RGBBlock(torch.nn.Module): 93 def __init__(self, latent_dim, input_channel, upsample, channels=3): 94 super().__init__() 95 self.input_channel = input_channel 96 self.to_style = torch.nn.Linear(latent_dim, input_channel) 97 98 out_filters = channels 99 self.conv = Conv2DModulated(input_channel, out_filters, 1, demod=False) 100 101 self.upsample = ( 102 torch.nn.Sequential(torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), Blur(),) 103 if upsample 104 else None 105 ) 106 107 def forward(self, x, prev_rgb, istyle): 108 style = self.to_style(istyle) 109 x = self.conv(x, style) 110 111 if prev_rgb is not None: 112 x = x + prev_rgb 113 114 if self.upsample is not None: 115 x = self.upsample(x) 116 117 return x 118 119 120 class Conv2DModulated(torch.nn.Module): 121 """ 122 Modulated convolution. 123 For details refer to [1] 124 [1] Karras et. al. - Analyzing and Improving the Image Quality of StyleGAN (https://arxiv.org/abs/1912.04958) 125 """ 126 127 def __init__( 128 self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps=1e-8, **kwargs, 129 ): 130 super().__init__() 131 self.filters = out_chan 132 self.demod = demod 133 self.kernel = kernel 134 self.stride = stride 135 self.dilation = dilation 136 self.weight = torch.nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel))) 137 self.eps = eps 138 torch.nn.init.kaiming_normal_(self.weight, a=0, mode="fan_in", nonlinearity="leaky_relu") 139 140 def _get_same_padding(self, size, kernel, dilation, stride): 141 return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2 142 143 def forward(self, x, y): 144 b, c, h, w = x.shape 145 146 w1 = y[:, None, :, None, None] 147 w2 = self.weight[None, :, :, :, :] 148 weights = w2 * (w1 + 1) 149 150 if self.demod: 151 d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) 152 weights = weights * d 153 154 x = x.reshape(1, -1, h, w) 155 156 _, _, *ws = weights.shape 157 weights = weights.reshape(b * self.filters, *ws) 158 159 padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride) 160 x = F.conv2d(x, weights, padding=padding, groups=b) 161 162 x = x.reshape(-1, self.filters, h, w) 163 return x 164 165 166 class GeneratorBlock(torch.nn.Module): 167 def __init__( 168 self, latent_dim, input_channels, filters, upsample=True, upsample_rgb=True, channels=1, 169 ): 170 super().__init__() 171 self.upsample = torch.nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) if upsample else None 172 173 self.to_style1 = torch.nn.Linear(latent_dim, input_channels) 174 self.to_noise1 = torch.nn.Linear(1, filters) 175 self.conv1 = Conv2DModulated(input_channels, filters, 3) 176 177 self.to_style2 = torch.nn.Linear(latent_dim, filters) 178 self.to_noise2 = torch.nn.Linear(1, filters) 179 self.conv2 = Conv2DModulated(filters, filters, 3) 180 181 self.activation = torch.nn.LeakyReLU(0.2, inplace=True) 182 self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, channels) 183 184 def forward(self, x, prev_rgb, istyle, inoise): 185 if self.upsample is not None: 186 x = self.upsample(x) 187 188 inoise = inoise[:, : x.shape[2], : x.shape[3], :] 189 noise1 = self.to_noise1(inoise).permute((0, 3, 1, 2)) 190 noise2 = self.to_noise2(inoise).permute((0, 3, 1, 2)) 191 192 style1 = self.to_style1(istyle) 193 x = self.conv1(x, style1) 194 x = self.activation(x + noise1) 195 196 style2 = self.to_style2(istyle) 197 x = self.conv2(x, style2) 198 x = self.activation(x + noise2) 199 200 rgb = self.to_rgb(x, prev_rgb, istyle) 201 return x, rgb 202 203 204 class DiscriminatorBlock(torch.nn.Module): 205 def __init__(self, input_channels, filters, downsample=True): 206 super().__init__() 207 self.conv_res = torch.nn.Conv2d(input_channels, filters, 1, stride=(2 if downsample else 1)) 208 209 self.net = torch.nn.Sequential( 210 torch.nn.Conv2d(input_channels, filters, 3, padding=1), 211 torch.nn.LeakyReLU(0.2, inplace=True), 212 torch.nn.Conv2d(filters, filters, 3, padding=1), 213 torch.nn.LeakyReLU(0.2, inplace=True), 214 ) 215 216 self.downsample = ( 217 torch.nn.Sequential(Blur(), torch.nn.Conv2d(filters, filters, 3, padding=1, stride=2)) 218 if downsample 219 else None 220 ) 221 222 def forward(self, x): 223 res = self.conv_res(x) 224 x = self.net(x) 225 if self.downsample is not None: 226 x = self.downsample(x) 227 x = (x + res) * (1 / math.sqrt(2)) 228 return x 229 230 231 class Generator(torch.nn.Module): 232 def __init__( 233 self, 234 n_bands, 235 latent_dim, 236 style_depth, 237 network_capacity=16, 238 channels=1, 239 fmap_max=512, 240 max_spectrogram_length=2000, 241 ): 242 super().__init__() 243 self.image_size = n_bands 244 self.latent_dim = latent_dim 245 self.num_layers = int(log2(n_bands) - 1) 246 self.style_depth = style_depth 247 248 self.style_mapping = StyleMapping(self.latent_dim, self.style_depth, lr_mul=0.1) 249 250 filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1] 251 252 set_fmap_max = partial(min, fmap_max) 253 filters = list(map(set_fmap_max, filters)) 254 init_channels = filters[0] 255 filters = [init_channels, *filters] 256 257 in_out_pairs = zip(filters[:-1], filters[1:]) 258 259 self.initial_conv = torch.nn.Conv2d(filters[0], filters[0], 3, padding=1) 260 self.blocks = torch.nn.ModuleList([]) 261 262 for ind, (in_chan, out_chan) in enumerate(in_out_pairs): 263 not_first = ind != 0 264 not_last = ind != (self.num_layers - 1) 265 266 block = GeneratorBlock( 267 latent_dim, in_chan, out_chan, upsample=not_first, upsample_rgb=not_last, channels=channels, 268 ) 269 self.blocks.append(block) 270 271 for m in self.modules(): 272 if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)): 273 torch.nn.init.kaiming_normal_(m.weight, a=0, mode="fan_in", nonlinearity="leaky_relu") 274 for block in self.blocks: 275 torch.nn.init.zeros_(block.to_noise1.weight) 276 torch.nn.init.zeros_(block.to_noise1.bias) 277 torch.nn.init.zeros_(block.to_noise2.weight) 278 torch.nn.init.zeros_(block.to_noise2.bias) 279 280 initial_block_size = n_bands // self.upsample_factor, math.ceil(max_spectrogram_length / self.upsample_factor) 281 self.initial_block = torch.nn.Parameter( 282 torch.randn((1, init_channels, *initial_block_size)), requires_grad=False 283 ) 284 285 def add_scaled_condition(self, target: torch.Tensor, condition: torch.Tensor, condition_lengths: torch.Tensor): 286 *_, target_height, _ = target.shape 287 *_, height, _ = condition.shape 288 289 scale = height // target_height 290 291 # scale appropriately 292 condition = F.interpolate(condition, size=target.shape[-2:], mode="bilinear") 293 294 # add and mask 295 result = (target + condition) / 2 296 result = mask_sequence_tensor(result, (condition_lengths / scale).ceil().long()) 297 298 return result 299 300 @property 301 def upsample_factor(self): 302 return 2 ** sum(1 for block in self.blocks if block.upsample) 303 304 def forward(self, condition: torch.Tensor, lengths: torch.Tensor, ws: List[torch.Tensor], noise: torch.Tensor): 305 batch_size, _, _, max_length = condition.shape 306 307 x = self.initial_block.expand(batch_size, -1, -1, -1) 308 x = x[:, :, :, : max_length // self.upsample_factor] 309 310 rgb = None 311 x = self.initial_conv(x) 312 313 for style, block in zip(ws, self.blocks): 314 x, rgb = block(x, rgb, style, noise) 315 316 x = self.add_scaled_condition(x, condition, lengths) 317 rgb = self.add_scaled_condition(rgb, condition, lengths) 318 319 return rgb 320 321 322 class Discriminator(torch.nn.Module): 323 def __init__( 324 self, n_bands, network_capacity=16, channels=1, fmap_max=512, 325 ): 326 super().__init__() 327 num_layers = int(log2(n_bands) - 1) 328 num_init_filters = channels 329 330 blocks = [] 331 filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)] 332 333 set_fmap_max = partial(min, fmap_max) 334 filters = list(map(set_fmap_max, filters)) 335 chan_in_out = list(zip(filters[:-1], filters[1:])) 336 337 blocks = [] 338 339 for ind, (in_chan, out_chan) in enumerate(chan_in_out): 340 is_not_last = ind != (len(chan_in_out) - 1) 341 342 block = DiscriminatorBlock(in_chan, out_chan, downsample=is_not_last) 343 blocks.append(block) 344 345 self.blocks = torch.nn.ModuleList(blocks) 346 347 channel_last = filters[-1] 348 latent_dim = channel_last 349 350 self.final_conv = torch.nn.Conv2d(channel_last, channel_last, 3, padding=1) 351 self.to_logit = torch.nn.Linear(latent_dim, 1) 352 353 for m in self.modules(): 354 if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)): 355 torch.nn.init.kaiming_normal_(m.weight, a=0, mode="fan_in", nonlinearity="leaky_relu") 356 357 def forward(self, x, condition: torch.Tensor, lengths: torch.Tensor): 358 for block in self.blocks: 359 x = block(x) 360 scale = condition.shape[-1] // x.shape[-1] 361 x = mask_sequence_tensor(x, (lengths / scale).ceil().long()) 362 363 x = self.final_conv(x) 364 365 scale = condition.shape[-1] // x.shape[-1] 366 x = mask_sequence_tensor(x, (lengths / scale).ceil().long()) 367 368 x = x.mean(axis=-2) 369 x = (x / rearrange(lengths / scale, "b -> b 1 1")).sum(axis=-1) 370 x = self.to_logit(x) 371 return x.squeeze() 372 [end of nemo/collections/tts/modules/spectrogram_enhancer.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
NVIDIA/NeMo
64b74dc9eaa6a23e52b697c9f9b7ad87528a2373
Spectrogram Enhancer doesn't generalize to spectrogram lengths unseen during training **Describe the bug** If one trains TTS spectrogram enhancer (#5565) on short spectrograms, at inference time it doesn't generalize to longer ones. A patch in the beginning gets enhanced but further frames do not. Example (before, after): ![before](https://user-images.githubusercontent.com/8864149/218520151-230501b1-4e9d-4307-b51e-526440b915c8.png) ![after](https://user-images.githubusercontent.com/8864149/218520199-22187a4b-9cf8-4ac9-b3ce-07c320d3d3b6.png) **Steps/Code to reproduce bug** 1. Train a spectrogram enhancer 2. Apply it to a spectrogram that's longer than anything from the training set 3. Only a patch in the beginning gets enhanced **Expected behavior** The whole spectrogram should have got additional details, not just the first patch
A temporary fix: given a trained model, clone first patch of the initial tensor length-wise: ``` max_init_length = enhancer.generator.initial_block.shape[-1] m = 6 for i in range(1, max_init_length // m): enhancer.generator.initial_block.data[:,:,:,i*m:(i+1)*m] = enhancer.generator.initial_block.data[:,:,:,0:m] ``` Visually fix works, not sure what about downstream tasks ![after_temporary_fix](https://user-images.githubusercontent.com/8864149/218524157-9283395d-f196-41aa-81fe-f8868c2acb67.png) As-is most likely breaks #5659 (setup with enhancer)
2023-02-20T16:02:45Z
<patch> diff --git a/nemo/collections/tts/modules/spectrogram_enhancer.py b/nemo/collections/tts/modules/spectrogram_enhancer.py --- a/nemo/collections/tts/modules/spectrogram_enhancer.py +++ b/nemo/collections/tts/modules/spectrogram_enhancer.py @@ -230,14 +230,7 @@ def forward(self, x): class Generator(torch.nn.Module): def __init__( - self, - n_bands, - latent_dim, - style_depth, - network_capacity=16, - channels=1, - fmap_max=512, - max_spectrogram_length=2000, + self, n_bands, latent_dim, style_depth, network_capacity=16, channels=1, fmap_max=512, ): super().__init__() self.image_size = n_bands @@ -277,7 +270,7 @@ def __init__( torch.nn.init.zeros_(block.to_noise2.weight) torch.nn.init.zeros_(block.to_noise2.bias) - initial_block_size = n_bands // self.upsample_factor, math.ceil(max_spectrogram_length / self.upsample_factor) + initial_block_size = n_bands // self.upsample_factor, 1 self.initial_block = torch.nn.Parameter( torch.randn((1, init_channels, *initial_block_size)), requires_grad=False ) @@ -304,8 +297,7 @@ def upsample_factor(self): def forward(self, condition: torch.Tensor, lengths: torch.Tensor, ws: List[torch.Tensor], noise: torch.Tensor): batch_size, _, _, max_length = condition.shape - x = self.initial_block.expand(batch_size, -1, -1, -1) - x = x[:, :, :, : max_length // self.upsample_factor] + x = self.initial_block.expand(batch_size, -1, -1, max_length // self.upsample_factor) rgb = None x = self.initial_conv(x) </patch>
diff --git a/tests/collections/tts/test_spectrogram_enhancer.py b/tests/collections/tts/test_spectrogram_enhancer.py --- a/tests/collections/tts/test_spectrogram_enhancer.py +++ b/tests/collections/tts/test_spectrogram_enhancer.py @@ -37,7 +37,6 @@ def enhancer_config(): "network_capacity": network_capacity, "mixed_prob": 0.9, "fmap_max": fmap_max, - "max_spectrogram_length": 2000, "generator": { "_target_": "nemo.collections.tts.modules.spectrogram_enhancer.Generator", "n_bands": n_bands, @@ -45,7 +44,6 @@ def enhancer_config(): "network_capacity": network_capacity, "style_depth": style_depth, "fmap_max": fmap_max, - "max_spectrogram_length": 2000, }, "discriminator": { "_target_": "nemo.collections.tts.modules.spectrogram_enhancer.Discriminator",
1.0
celery__celery-567
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Introduce CELERYCTL variable in /etc/init.d/celeryd and /etc/default/celeryd I ran into a problem with '/etc/init.d/celeryd status' not finding celeryctl when using django-celery in a virtualenv. These changes replace the 'celeryctl' reference with a $CELERYCTL variable so /etc/default/celeryd can be updated with the celeryctl location similar to the way CELERYD_MULTI is handled. </issue> <code> [start of README] 1 ================================= 2 celery - Distributed Task Queue 3 ================================= 4 5 .. image:: http://cloud.github.com/downloads/ask/celery/celery_128.png 6 7 :Version: 2.4.5 8 :Web: http://celeryproject.org/ 9 :Download: http://pypi.python.org/pypi/celery/ 10 :Source: http://github.com/ask/celery/ 11 :Keywords: task queue, job queue, asynchronous, rabbitmq, amqp, redis, 12 python, webhooks, queue, distributed 13 14 -- 15 16 .. _celery-synopsis: 17 18 Celery is an open source asynchronous task queue/job queue based on 19 distributed message passing. It is focused on real-time operation, 20 but supports scheduling as well. 21 22 The execution units, called tasks, are executed concurrently on one or 23 more worker nodes using multiprocessing, `Eventlet`_ or `gevent`_. Tasks can 24 execute asynchronously (in the background) or synchronously 25 (wait until ready). 26 27 Celery is used in production systems to process millions of tasks a day. 28 29 Celery is written in Python, but the protocol can be implemented in any 30 language. It can also `operate with other languages using webhooks`_. 31 32 The recommended message broker is `RabbitMQ`_, but `limited support`_ for 33 `Redis`_, `Beanstalk`_, `MongoDB`_, `CouchDB`_ and 34 databases (using `SQLAlchemy`_ or the `Django ORM`_) is also available. 35 36 37 Celery is easy to integrate with `Django`_, `Pylons`_ and `Flask`_, using 38 the `django-celery`_, `celery-pylons`_ and `Flask-Celery`_ add-on packages. 39 40 .. _`RabbitMQ`: http://www.rabbitmq.com/ 41 .. _`Redis`: http://code.google.com/p/redis/ 42 .. _`SQLAlchemy`: http://www.sqlalchemy.org/ 43 .. _`Django`: http://djangoproject.com/ 44 .. _`Django ORM`: http://djangoproject.com/ 45 .. _`Eventlet`: http://eventlet.net/ 46 .. _`gevent`: http://gevent.org/ 47 .. _`Beanstalk`: http://kr.github.com/beanstalkd/ 48 .. _`MongoDB`: http://mongodb.org/ 49 .. _`CouchDB`: http://couchdb.apache.org/ 50 .. _`Pylons`: http://pylonshq.com/ 51 .. _`Flask`: http://flask.pocoo.org/ 52 .. _`django-celery`: http://pypi.python.org/pypi/django-celery 53 .. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons 54 .. _`Flask-Celery`: http://github.com/ask/flask-celery/ 55 .. _`operate with other languages using webhooks`: 56 http://ask.github.com/celery/userguide/remote-tasks.html 57 .. _`limited support`: 58 http://kombu.readthedocs.org/en/latest/introduction.html#transport-comparison 59 60 .. contents:: 61 :local: 62 63 .. _celery-overview: 64 65 Overview 66 ======== 67 68 This is a high level overview of the architecture. 69 70 .. image:: http://cloud.github.com/downloads/ask/celery/Celery-Overview-v4.jpg 71 72 The broker delivers tasks to the worker nodes. 73 A worker node is a networked machine running `celeryd`. This can be one or 74 more machines depending on the workload. 75 76 The result of the task can be stored for later retrieval (called its 77 "tombstone"). 78 79 .. _celery-example: 80 81 Example 82 ======= 83 84 You probably want to see some code by now, so here's an example task 85 adding two numbers: 86 :: 87 88 from celery.task import task 89 90 @task 91 def add(x, y): 92 return x + y 93 94 You can execute the task in the background, or wait for it to finish:: 95 96 >>> result = add.delay(4, 4) 97 >>> result.wait() # wait for and return the result 98 8 99 100 Simple! 101 102 .. _celery-features: 103 104 Features 105 ======== 106 107 +-----------------+----------------------------------------------------+ 108 | Messaging | Supported brokers include `RabbitMQ`_, `Redis`_, | 109 | | `Beanstalk`_, `MongoDB`_, `CouchDB`_, and popular | 110 | | SQL databases. | 111 +-----------------+----------------------------------------------------+ 112 | Fault-tolerant | Excellent configurable error recovery when using | 113 | | `RabbitMQ`, ensures your tasks are never lost. | 114 +-----------------+----------------------------------------------------+ 115 | Distributed | Runs on one or more machines. Supports | 116 | | broker `clustering`_ and `HA`_ when used in | 117 | | combination with `RabbitMQ`_. You can set up new | 118 | | workers without central configuration (e.g. use | 119 | | your grandma's laptop to help if the queue is | 120 | | temporarily congested). | 121 +-----------------+----------------------------------------------------+ 122 | Concurrency | Concurrency is achieved by using multiprocessing, | 123 | | `Eventlet`_, `gevent` or a mix of these. | 124 +-----------------+----------------------------------------------------+ 125 | Scheduling | Supports recurring tasks like cron, or specifying | 126 | | an exact date or countdown for when after the task | 127 | | should be executed. | 128 +-----------------+----------------------------------------------------+ 129 | Latency | Low latency means you are able to execute tasks | 130 | | *while the user is waiting*. | 131 +-----------------+----------------------------------------------------+ 132 | Return Values | Task return values can be saved to the selected | 133 | | result store backend. You can wait for the result, | 134 | | retrieve it later, or ignore it. | 135 +-----------------+----------------------------------------------------+ 136 | Result Stores | Database, `MongoDB`_, `Redis`_, `Tokyo Tyrant`, | 137 | | `Cassandra`, or `AMQP`_ (message notification). | 138 +-----------------+----------------------------------------------------+ 139 | Webhooks | Your tasks can also be HTTP callbacks, enabling | 140 | | cross-language communication. | 141 +-----------------+----------------------------------------------------+ 142 | Rate limiting | Supports rate limiting by using the token bucket | 143 | | algorithm, which accounts for bursts of traffic. | 144 | | Rate limits can be set for each task type, or | 145 | | globally for all. | 146 +-----------------+----------------------------------------------------+ 147 | Routing | Using AMQP's flexible routing model you can route | 148 | | tasks to different workers, or select different | 149 | | message topologies, by configuration or even at | 150 | | runtime. | 151 +-----------------+----------------------------------------------------+ 152 | Remote-control | Worker nodes can be controlled from remote by | 153 | | using broadcast messaging. A range of built-in | 154 | | commands exist in addition to the ability to | 155 | | easily define your own. (AMQP/Redis only) | 156 +-----------------+----------------------------------------------------+ 157 | Monitoring | You can capture everything happening with the | 158 | | workers in real-time by subscribing to events. | 159 | | A real-time web monitor is in development. | 160 +-----------------+----------------------------------------------------+ 161 | Serialization | Supports Pickle, JSON, YAML, or easily defined | 162 | | custom schemes. One task invocation can have a | 163 | | different scheme than another. | 164 +-----------------+----------------------------------------------------+ 165 | Tracebacks | Errors and tracebacks are stored and can be | 166 | | investigated after the fact. | 167 +-----------------+----------------------------------------------------+ 168 | UUID | Every task has an UUID (Universally Unique | 169 | | Identifier), which is the task id used to query | 170 | | task status and return value. | 171 +-----------------+----------------------------------------------------+ 172 | Retries | Tasks can be retried if they fail, with | 173 | | configurable maximum number of retries, and delays | 174 | | between each retry. | 175 +-----------------+----------------------------------------------------+ 176 | Task Sets | A Task set is a task consisting of several | 177 | | sub-tasks. You can find out how many, or if all | 178 | | of the sub-tasks has been executed, and even | 179 | | retrieve the results in order. Progress bars, | 180 | | anyone? | 181 +-----------------+----------------------------------------------------+ 182 | Made for Web | You can query status and results via URLs, | 183 | | enabling the ability to poll task status using | 184 | | Ajax. | 185 +-----------------+----------------------------------------------------+ 186 | Error Emails | Can be configured to send emails to the | 187 | | administrators when tasks fails. | 188 +-----------------+----------------------------------------------------+ 189 190 191 .. _`clustering`: http://www.rabbitmq.com/clustering.html 192 .. _`HA`: http://www.rabbitmq.com/pacemaker.html 193 .. _`AMQP`: http://www.amqp.org/ 194 .. _`Stomp`: http://stomp.codehaus.org/ 195 .. _`Tokyo Tyrant`: http://tokyocabinet.sourceforge.net/ 196 197 .. _celery-documentation: 198 199 Documentation 200 ============= 201 202 The `latest documentation`_ with user guides, tutorials and API reference 203 is hosted at Github. 204 205 .. _`latest documentation`: http://ask.github.com/celery/ 206 207 .. _celery-installation: 208 209 Installation 210 ============ 211 212 You can install Celery either via the Python Package Index (PyPI) 213 or from source. 214 215 To install using `pip`,:: 216 217 $ pip install -U Celery 218 219 To install using `easy_install`,:: 220 221 $ easy_install -U Celery 222 223 Bundles 224 ------- 225 226 Celery also defines a group of bundles that can be used 227 to install Celery and the dependencies for a given feature. 228 229 The following bundles are available: 230 231 :`celery-with-redis`_: 232 for using Redis as a broker. 233 234 :`celery-with-mongodb`_: 235 for using MongoDB as a broker. 236 237 :`django-celery-with-redis`_: 238 for Django, and using Redis as a broker. 239 240 :`django-celery-with-mongodb`_: 241 for Django, and using MongoDB as a broker. 242 243 :`bundle-celery`_: 244 convenience bundle installing *Celery* and related packages. 245 246 .. _`celery-with-redis`: 247 http://pypi.python.org/pypi/celery-with-redis/ 248 .. _`celery-with-mongodb`: 249 http://pypi.python.org/pypi/celery-with-mongdb/ 250 .. _`django-celery-with-redis`: 251 http://pypi.python.org/pypi/django-celery-with-redis/ 252 .. _`django-celery-with-mongodb`: 253 http://pypi.python.org/pypi/django-celery-with-mongdb/ 254 .. _`bundle-celery`: 255 http://pypi.python.org/pypi/bundle-celery/ 256 257 .. _celery-installing-from-source: 258 259 Downloading and installing from source 260 -------------------------------------- 261 262 Download the latest version of Celery from 263 http://pypi.python.org/pypi/celery/ 264 265 You can install it by doing the following,:: 266 267 $ tar xvfz celery-0.0.0.tar.gz 268 $ cd celery-0.0.0 269 $ python setup.py build 270 # python setup.py install # as root 271 272 .. _celery-installing-from-git: 273 274 Using the development version 275 ----------------------------- 276 277 You can clone the repository by doing the following:: 278 279 $ git clone git://github.com/ask/celery.git 280 281 .. _getting-help: 282 283 Getting Help 284 ============ 285 286 .. _mailing-list: 287 288 Mailing list 289 ------------ 290 291 For discussions about the usage, development, and future of celery, 292 please join the `celery-users`_ mailing list. 293 294 .. _`celery-users`: http://groups.google.com/group/celery-users/ 295 296 .. _irc-channel: 297 298 IRC 299 --- 300 301 Come chat with us on IRC. The `#celery`_ channel is located at the `Freenode`_ 302 network. 303 304 .. _`#celery`: irc://irc.freenode.net/celery 305 .. _`Freenode`: http://freenode.net 306 307 .. _bug-tracker: 308 309 Bug tracker 310 =========== 311 312 If you have any suggestions, bug reports or annoyances please report them 313 to our issue tracker at http://github.com/ask/celery/issues/ 314 315 .. _wiki: 316 317 Wiki 318 ==== 319 320 http://wiki.github.com/ask/celery/ 321 322 .. _contributing-short: 323 324 Contributing 325 ============ 326 327 Development of `celery` happens at Github: http://github.com/ask/celery 328 329 You are highly encouraged to participate in the development 330 of `celery`. If you don't like Github (for some reason) you're welcome 331 to send regular patches. 332 333 Be sure to also read the `Contributing to Celery`_ section in the 334 documentation. 335 336 .. _`Contributing to Celery`: http://ask.github.com/celery/contributing.html 337 338 .. _license: 339 340 License 341 ======= 342 343 This software is licensed under the `New BSD License`. See the ``LICENSE`` 344 file in the top distribution directory for the full license text. 345 346 .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround 347 348 [end of README] [start of README.rst] 1 ================================= 2 celery - Distributed Task Queue 3 ================================= 4 5 .. image:: http://cloud.github.com/downloads/ask/celery/celery_128.png 6 7 :Version: 2.4.5 8 :Web: http://celeryproject.org/ 9 :Download: http://pypi.python.org/pypi/celery/ 10 :Source: http://github.com/ask/celery/ 11 :Keywords: task queue, job queue, asynchronous, rabbitmq, amqp, redis, 12 python, webhooks, queue, distributed 13 14 -- 15 16 .. _celery-synopsis: 17 18 Celery is an open source asynchronous task queue/job queue based on 19 distributed message passing. It is focused on real-time operation, 20 but supports scheduling as well. 21 22 The execution units, called tasks, are executed concurrently on one or 23 more worker nodes using multiprocessing, `Eventlet`_ or `gevent`_. Tasks can 24 execute asynchronously (in the background) or synchronously 25 (wait until ready). 26 27 Celery is used in production systems to process millions of tasks a day. 28 29 Celery is written in Python, but the protocol can be implemented in any 30 language. It can also `operate with other languages using webhooks`_. 31 32 The recommended message broker is `RabbitMQ`_, but `limited support`_ for 33 `Redis`_, `Beanstalk`_, `MongoDB`_, `CouchDB`_ and 34 databases (using `SQLAlchemy`_ or the `Django ORM`_) is also available. 35 36 37 Celery is easy to integrate with `Django`_, `Pylons`_ and `Flask`_, using 38 the `django-celery`_, `celery-pylons`_ and `Flask-Celery`_ add-on packages. 39 40 .. _`RabbitMQ`: http://www.rabbitmq.com/ 41 .. _`Redis`: http://code.google.com/p/redis/ 42 .. _`SQLAlchemy`: http://www.sqlalchemy.org/ 43 .. _`Django`: http://djangoproject.com/ 44 .. _`Django ORM`: http://djangoproject.com/ 45 .. _`Eventlet`: http://eventlet.net/ 46 .. _`gevent`: http://gevent.org/ 47 .. _`Beanstalk`: http://kr.github.com/beanstalkd/ 48 .. _`MongoDB`: http://mongodb.org/ 49 .. _`CouchDB`: http://couchdb.apache.org/ 50 .. _`Pylons`: http://pylonshq.com/ 51 .. _`Flask`: http://flask.pocoo.org/ 52 .. _`django-celery`: http://pypi.python.org/pypi/django-celery 53 .. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons 54 .. _`Flask-Celery`: http://github.com/ask/flask-celery/ 55 .. _`operate with other languages using webhooks`: 56 http://ask.github.com/celery/userguide/remote-tasks.html 57 .. _`limited support`: 58 http://kombu.readthedocs.org/en/latest/introduction.html#transport-comparison 59 60 .. contents:: 61 :local: 62 63 .. _celery-overview: 64 65 Overview 66 ======== 67 68 This is a high level overview of the architecture. 69 70 .. image:: http://cloud.github.com/downloads/ask/celery/Celery-Overview-v4.jpg 71 72 The broker delivers tasks to the worker nodes. 73 A worker node is a networked machine running `celeryd`. This can be one or 74 more machines depending on the workload. 75 76 The result of the task can be stored for later retrieval (called its 77 "tombstone"). 78 79 .. _celery-example: 80 81 Example 82 ======= 83 84 You probably want to see some code by now, so here's an example task 85 adding two numbers: 86 :: 87 88 from celery.task import task 89 90 @task 91 def add(x, y): 92 return x + y 93 94 You can execute the task in the background, or wait for it to finish:: 95 96 >>> result = add.delay(4, 4) 97 >>> result.wait() # wait for and return the result 98 8 99 100 Simple! 101 102 .. _celery-features: 103 104 Features 105 ======== 106 107 +-----------------+----------------------------------------------------+ 108 | Messaging | Supported brokers include `RabbitMQ`_, `Redis`_, | 109 | | `Beanstalk`_, `MongoDB`_, `CouchDB`_, and popular | 110 | | SQL databases. | 111 +-----------------+----------------------------------------------------+ 112 | Fault-tolerant | Excellent configurable error recovery when using | 113 | | `RabbitMQ`, ensures your tasks are never lost. | 114 +-----------------+----------------------------------------------------+ 115 | Distributed | Runs on one or more machines. Supports | 116 | | broker `clustering`_ and `HA`_ when used in | 117 | | combination with `RabbitMQ`_. You can set up new | 118 | | workers without central configuration (e.g. use | 119 | | your grandma's laptop to help if the queue is | 120 | | temporarily congested). | 121 +-----------------+----------------------------------------------------+ 122 | Concurrency | Concurrency is achieved by using multiprocessing, | 123 | | `Eventlet`_, `gevent` or a mix of these. | 124 +-----------------+----------------------------------------------------+ 125 | Scheduling | Supports recurring tasks like cron, or specifying | 126 | | an exact date or countdown for when after the task | 127 | | should be executed. | 128 +-----------------+----------------------------------------------------+ 129 | Latency | Low latency means you are able to execute tasks | 130 | | *while the user is waiting*. | 131 +-----------------+----------------------------------------------------+ 132 | Return Values | Task return values can be saved to the selected | 133 | | result store backend. You can wait for the result, | 134 | | retrieve it later, or ignore it. | 135 +-----------------+----------------------------------------------------+ 136 | Result Stores | Database, `MongoDB`_, `Redis`_, `Tokyo Tyrant`, | 137 | | `Cassandra`, or `AMQP`_ (message notification). | 138 +-----------------+----------------------------------------------------+ 139 | Webhooks | Your tasks can also be HTTP callbacks, enabling | 140 | | cross-language communication. | 141 +-----------------+----------------------------------------------------+ 142 | Rate limiting | Supports rate limiting by using the token bucket | 143 | | algorithm, which accounts for bursts of traffic. | 144 | | Rate limits can be set for each task type, or | 145 | | globally for all. | 146 +-----------------+----------------------------------------------------+ 147 | Routing | Using AMQP's flexible routing model you can route | 148 | | tasks to different workers, or select different | 149 | | message topologies, by configuration or even at | 150 | | runtime. | 151 +-----------------+----------------------------------------------------+ 152 | Remote-control | Worker nodes can be controlled from remote by | 153 | | using broadcast messaging. A range of built-in | 154 | | commands exist in addition to the ability to | 155 | | easily define your own. (AMQP/Redis only) | 156 +-----------------+----------------------------------------------------+ 157 | Monitoring | You can capture everything happening with the | 158 | | workers in real-time by subscribing to events. | 159 | | A real-time web monitor is in development. | 160 +-----------------+----------------------------------------------------+ 161 | Serialization | Supports Pickle, JSON, YAML, or easily defined | 162 | | custom schemes. One task invocation can have a | 163 | | different scheme than another. | 164 +-----------------+----------------------------------------------------+ 165 | Tracebacks | Errors and tracebacks are stored and can be | 166 | | investigated after the fact. | 167 +-----------------+----------------------------------------------------+ 168 | UUID | Every task has an UUID (Universally Unique | 169 | | Identifier), which is the task id used to query | 170 | | task status and return value. | 171 +-----------------+----------------------------------------------------+ 172 | Retries | Tasks can be retried if they fail, with | 173 | | configurable maximum number of retries, and delays | 174 | | between each retry. | 175 +-----------------+----------------------------------------------------+ 176 | Task Sets | A Task set is a task consisting of several | 177 | | sub-tasks. You can find out how many, or if all | 178 | | of the sub-tasks has been executed, and even | 179 | | retrieve the results in order. Progress bars, | 180 | | anyone? | 181 +-----------------+----------------------------------------------------+ 182 | Made for Web | You can query status and results via URLs, | 183 | | enabling the ability to poll task status using | 184 | | Ajax. | 185 +-----------------+----------------------------------------------------+ 186 | Error Emails | Can be configured to send emails to the | 187 | | administrators when tasks fails. | 188 +-----------------+----------------------------------------------------+ 189 190 191 .. _`clustering`: http://www.rabbitmq.com/clustering.html 192 .. _`HA`: http://www.rabbitmq.com/pacemaker.html 193 .. _`AMQP`: http://www.amqp.org/ 194 .. _`Stomp`: http://stomp.codehaus.org/ 195 .. _`Tokyo Tyrant`: http://tokyocabinet.sourceforge.net/ 196 197 .. _celery-documentation: 198 199 Documentation 200 ============= 201 202 The `latest documentation`_ with user guides, tutorials and API reference 203 is hosted at Github. 204 205 .. _`latest documentation`: http://ask.github.com/celery/ 206 207 .. _celery-installation: 208 209 Installation 210 ============ 211 212 You can install Celery either via the Python Package Index (PyPI) 213 or from source. 214 215 To install using `pip`,:: 216 217 $ pip install -U Celery 218 219 To install using `easy_install`,:: 220 221 $ easy_install -U Celery 222 223 Bundles 224 ------- 225 226 Celery also defines a group of bundles that can be used 227 to install Celery and the dependencies for a given feature. 228 229 The following bundles are available: 230 231 :`celery-with-redis`_: 232 for using Redis as a broker. 233 234 :`celery-with-mongodb`_: 235 for using MongoDB as a broker. 236 237 :`django-celery-with-redis`_: 238 for Django, and using Redis as a broker. 239 240 :`django-celery-with-mongodb`_: 241 for Django, and using MongoDB as a broker. 242 243 :`bundle-celery`_: 244 convenience bundle installing *Celery* and related packages. 245 246 .. _`celery-with-redis`: 247 http://pypi.python.org/pypi/celery-with-redis/ 248 .. _`celery-with-mongodb`: 249 http://pypi.python.org/pypi/celery-with-mongdb/ 250 .. _`django-celery-with-redis`: 251 http://pypi.python.org/pypi/django-celery-with-redis/ 252 .. _`django-celery-with-mongodb`: 253 http://pypi.python.org/pypi/django-celery-with-mongdb/ 254 .. _`bundle-celery`: 255 http://pypi.python.org/pypi/bundle-celery/ 256 257 .. _celery-installing-from-source: 258 259 Downloading and installing from source 260 -------------------------------------- 261 262 Download the latest version of Celery from 263 http://pypi.python.org/pypi/celery/ 264 265 You can install it by doing the following,:: 266 267 $ tar xvfz celery-0.0.0.tar.gz 268 $ cd celery-0.0.0 269 $ python setup.py build 270 # python setup.py install # as root 271 272 .. _celery-installing-from-git: 273 274 Using the development version 275 ----------------------------- 276 277 You can clone the repository by doing the following:: 278 279 $ git clone git://github.com/ask/celery.git 280 281 .. _getting-help: 282 283 Getting Help 284 ============ 285 286 .. _mailing-list: 287 288 Mailing list 289 ------------ 290 291 For discussions about the usage, development, and future of celery, 292 please join the `celery-users`_ mailing list. 293 294 .. _`celery-users`: http://groups.google.com/group/celery-users/ 295 296 .. _irc-channel: 297 298 IRC 299 --- 300 301 Come chat with us on IRC. The `#celery`_ channel is located at the `Freenode`_ 302 network. 303 304 .. _`#celery`: irc://irc.freenode.net/celery 305 .. _`Freenode`: http://freenode.net 306 307 .. _bug-tracker: 308 309 Bug tracker 310 =========== 311 312 If you have any suggestions, bug reports or annoyances please report them 313 to our issue tracker at http://github.com/ask/celery/issues/ 314 315 .. _wiki: 316 317 Wiki 318 ==== 319 320 http://wiki.github.com/ask/celery/ 321 322 .. _contributing-short: 323 324 Contributing 325 ============ 326 327 Development of `celery` happens at Github: http://github.com/ask/celery 328 329 You are highly encouraged to participate in the development 330 of `celery`. If you don't like Github (for some reason) you're welcome 331 to send regular patches. 332 333 Be sure to also read the `Contributing to Celery`_ section in the 334 documentation. 335 336 .. _`Contributing to Celery`: http://ask.github.com/celery/contributing.html 337 338 .. _license: 339 340 License 341 ======= 342 343 This software is licensed under the `New BSD License`. See the ``LICENSE`` 344 file in the top distribution directory for the full license text. 345 346 .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround 347 348 [end of README.rst] [start of /dev/null] 1 [end of /dev/null] [start of celery/__init__.py] 1 # -*- coding: utf-8 -*- 2 """Distributed Task Queue""" 3 # :copyright: (c) 2009 - 2011 by Ask Solem. 4 # :license: BSD, see LICENSE for more details. 5 6 from __future__ import absolute_import 7 8 import os 9 import sys 10 11 VERSION = (2, 4, 5) 12 13 __version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:]) 14 __author__ = "Ask Solem" 15 __contact__ = "ask@celeryproject.org" 16 __homepage__ = "http://celeryproject.org" 17 __docformat__ = "restructuredtext" 18 19 if sys.version_info < (2, 5): 20 raise Exception( 21 "Python 2.4 is not supported by this version. " 22 "Please use Celery versions 2.1.x or earlier.") 23 24 25 def Celery(*args, **kwargs): 26 from .app import App 27 return App(*args, **kwargs) 28 29 if not os.environ.get("CELERY_NO_EVAL", False): 30 from .local import Proxy 31 32 def _get_current_app(): 33 from .app import current_app 34 return current_app() 35 36 current_app = Proxy(_get_current_app) 37 [end of celery/__init__.py] [start of celery/app/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.app 4 ~~~~~~~~~~ 5 6 Celery Application. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 13 from __future__ import absolute_import 14 15 import os 16 import threading 17 18 from functools import wraps 19 from inspect import getargspec 20 21 from .. import registry 22 from ..utils import cached_property, instantiate 23 24 from . import base 25 26 # Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute 27 # sets this, so it will always contain the last instantiated app, 28 # and is the default app returned by :func:`app_or_default`. 29 _tls = threading.local() 30 _tls.current_app = None 31 32 33 class AppPickler(object): 34 35 def __call__(self, cls, *args): 36 kwargs = self.build_kwargs(*args) 37 app = self.construct(cls, **kwargs) 38 self.prepare(app, **kwargs) 39 return app 40 41 def prepare(self, app, **kwargs): 42 app.conf.update(kwargs["changes"]) 43 44 def build_kwargs(self, *args): 45 return self.build_standard_kwargs(*args) 46 47 def build_standard_kwargs(self, main, changes, loader, backend, amqp, 48 events, log, control, accept_magic_kwargs): 49 return dict(main=main, loader=loader, backend=backend, amqp=amqp, 50 changes=changes, events=events, log=log, control=control, 51 set_as_current=False, 52 accept_magic_kwargs=accept_magic_kwargs) 53 54 def construct(self, cls, **kwargs): 55 return cls(**kwargs) 56 57 58 def _unpickle_app(cls, pickler, *args): 59 return pickler()(cls, *args) 60 61 62 class App(base.BaseApp): 63 """Celery Application. 64 65 :param main: Name of the main module if running as `__main__`. 66 :keyword loader: The loader class, or the name of the loader class to use. 67 Default is :class:`celery.loaders.app.AppLoader`. 68 :keyword backend: The result store backend class, or the name of the 69 backend class to use. Default is the value of the 70 :setting:`CELERY_RESULT_BACKEND` setting. 71 :keyword amqp: AMQP object or class name. 72 :keyword events: Events object or class name. 73 :keyword log: Log object or class name. 74 :keyword control: Control object or class name. 75 :keyword set_as_current: Make this the global current app. 76 77 """ 78 Pickler = AppPickler 79 80 def set_current(self): 81 """Make this the current app for this thread.""" 82 _tls.current_app = self 83 84 def on_init(self): 85 if self.set_as_current: 86 self.set_current() 87 88 def create_task_cls(self): 89 """Creates a base task class using default configuration 90 taken from this app.""" 91 conf = self.conf 92 93 from .task import BaseTask 94 95 class Task(BaseTask): 96 abstract = True 97 app = self 98 backend = self.backend 99 exchange_type = conf.CELERY_DEFAULT_EXCHANGE_TYPE 100 delivery_mode = conf.CELERY_DEFAULT_DELIVERY_MODE 101 send_error_emails = conf.CELERY_SEND_TASK_ERROR_EMAILS 102 error_whitelist = conf.CELERY_TASK_ERROR_WHITELIST 103 serializer = conf.CELERY_TASK_SERIALIZER 104 rate_limit = conf.CELERY_DEFAULT_RATE_LIMIT 105 track_started = conf.CELERY_TRACK_STARTED 106 acks_late = conf.CELERY_ACKS_LATE 107 ignore_result = conf.CELERY_IGNORE_RESULT 108 store_errors_even_if_ignored = \ 109 conf.CELERY_STORE_ERRORS_EVEN_IF_IGNORED 110 accept_magic_kwargs = self.accept_magic_kwargs 111 Task.__doc__ = BaseTask.__doc__ 112 113 return Task 114 115 def Worker(self, **kwargs): 116 """Create new :class:`~celery.apps.worker.Worker` instance.""" 117 return instantiate("celery.apps.worker.Worker", app=self, **kwargs) 118 119 def Beat(self, **kwargs): 120 """Create new :class:`~celery.apps.beat.Beat` instance.""" 121 return instantiate("celery.apps.beat.Beat", app=self, **kwargs) 122 123 def TaskSet(self, *args, **kwargs): 124 """Create new :class:`~celery.task.sets.TaskSet`.""" 125 from ..task.sets import TaskSet 126 kwargs["app"] = self 127 return TaskSet(*args, **kwargs) 128 129 def worker_main(self, argv=None): 130 """Run :program:`celeryd` using `argv`. Uses :data:`sys.argv` 131 if `argv` is not specified.""" 132 from ..bin.celeryd import WorkerCommand 133 return WorkerCommand(app=self).execute_from_commandline(argv) 134 135 def task(self, *args, **options): 136 """Decorator to create a task class out of any callable. 137 138 .. admonition:: Examples 139 140 .. code-block:: python 141 142 @task() 143 def refresh_feed(url): 144 return Feed.objects.get(url=url).refresh() 145 146 With setting extra options and using retry. 147 148 .. code-block:: python 149 150 @task(exchange="feeds") 151 def refresh_feed(url, **kwargs): 152 try: 153 return Feed.objects.get(url=url).refresh() 154 except socket.error, exc: 155 refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc) 156 157 Calling the resulting task: 158 159 >>> refresh_feed("http://example.com/rss") # Regular 160 <Feed: http://example.com/rss> 161 >>> refresh_feed.delay("http://example.com/rss") # Async 162 <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d> 163 164 """ 165 166 def inner_create_task_cls(**options): 167 168 def _create_task_cls(fun): 169 options["app"] = self 170 options.setdefault("accept_magic_kwargs", False) 171 base = options.pop("base", None) or self.Task 172 173 @wraps(fun, assigned=("__module__", "__name__")) 174 def run(self, *args, **kwargs): 175 return fun(*args, **kwargs) 176 177 # Save the argspec for this task so we can recognize 178 # which default task kwargs we're going to pass to it later. 179 # (this happens in celery.utils.fun_takes_kwargs) 180 run.argspec = getargspec(fun) 181 182 cls_dict = dict(options, run=run, 183 __module__=fun.__module__, 184 __doc__=fun.__doc__) 185 T = type(fun.__name__, (base, ), cls_dict)() 186 return registry.tasks[T.name] # global instance. 187 188 return _create_task_cls 189 190 if len(args) == 1 and callable(args[0]): 191 return inner_create_task_cls(**options)(*args) 192 return inner_create_task_cls(**options) 193 194 @cached_property 195 def Task(self): 196 """Default Task base class for this application.""" 197 return self.create_task_cls() 198 199 def __repr__(self): 200 return "<Celery: %s:0x%x>" % (self.main or "__main__", id(self), ) 201 202 def __reduce__(self): 203 # Reduce only pickles the configuration changes, 204 # so the default configuration doesn't have to be passed 205 # between processes. 206 return (_unpickle_app, (self.__class__, self.Pickler) 207 + self.__reduce_args__()) 208 209 def __reduce_args__(self): 210 return (self.main, 211 self.conf.changes, 212 self.loader_cls, 213 self.backend_cls, 214 self.amqp_cls, 215 self.events_cls, 216 self.log_cls, 217 self.control_cls, 218 self.accept_magic_kwargs) 219 220 221 #: The "default" loader is the default loader used by old applications. 222 default_loader = os.environ.get("CELERY_LOADER") or "default" 223 224 #: Global fallback app instance. 225 default_app = App("default", loader=default_loader, 226 set_as_current=False, accept_magic_kwargs=True) 227 228 229 def current_app(): 230 return getattr(_tls, "current_app", None) or default_app 231 232 233 def _app_or_default(app=None): 234 """Returns the app provided or the default app if none. 235 236 The environment variable :envvar:`CELERY_TRACE_APP` is used to 237 trace app leaks. When enabled an exception is raised if there 238 is no active app. 239 240 """ 241 if app is None: 242 return getattr(_tls, "current_app", None) or default_app 243 return app 244 245 246 def _app_or_default_trace(app=None): # pragma: no cover 247 from traceback import print_stack 248 from multiprocessing import current_process 249 if app is None: 250 if getattr(_tls, "current_app", None): 251 print("-- RETURNING TO CURRENT APP --") # noqa+ 252 print_stack() 253 return _tls.current_app 254 if current_process()._name == "MainProcess": 255 raise Exception("DEFAULT APP") 256 print("-- RETURNING TO DEFAULT APP --") # noqa+ 257 print_stack() 258 return default_app 259 return app 260 261 262 def enable_trace(): 263 global app_or_default 264 app_or_default = _app_or_default_trace 265 266 267 def disable_trace(): 268 global app_or_default 269 app_or_default = _app_or_default 270 271 272 app_or_default = _app_or_default 273 if os.environ.get("CELERY_TRACE_APP"): # pragma: no cover 274 enable_trace() 275 [end of celery/app/__init__.py] [start of celery/app/amqp.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.app.amqp 4 ~~~~~~~~~~~~~~~ 5 6 AMQ related functionality. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 from datetime import datetime, timedelta 15 16 from kombu import BrokerConnection, Exchange 17 from kombu import compat as messaging 18 from kombu.pools import ProducerPool 19 20 from .. import routes as _routes 21 from .. import signals 22 from ..utils import cached_property, textindent, uuid 23 24 #: List of known options to a Kombu producers send method. 25 #: Used to extract the message related options out of any `dict`. 26 MSG_OPTIONS = ("mandatory", "priority", "immediate", "routing_key", 27 "serializer", "delivery_mode", "compression") 28 29 #: Human readable queue declaration. 30 QUEUE_FORMAT = """ 31 . %(name)s exchange:%(exchange)s (%(exchange_type)s) \ 32 binding:%(binding_key)s 33 """ 34 35 #: Set of exchange names that have already been declared. 36 _exchanges_declared = set() 37 38 #: Set of queue names that have already been declared. 39 _queues_declared = set() 40 41 42 def extract_msg_options(options, keep=MSG_OPTIONS): 43 """Extracts known options to `basic_publish` from a dict, 44 and returns a new dict.""" 45 return dict((name, options.get(name)) for name in keep) 46 47 48 class Queues(dict): 49 """Queue nameβ‡’ declaration mapping. 50 51 Celery will consult this mapping to find the options 52 for any queue by name. 53 54 :param queues: Initial mapping. 55 56 """ 57 #: If set, this is a subset of queues to consume from. 58 #: The rest of the queues are then used for routing only. 59 _consume_from = None 60 61 def __init__(self, queues): 62 dict.__init__(self) 63 for queue_name, options in (queues or {}).items(): 64 self.add(queue_name, **options) 65 66 def add(self, queue, exchange=None, routing_key=None, 67 exchange_type="direct", **options): 68 """Add new queue. 69 70 :param queue: Name of the queue. 71 :keyword exchange: Name of the exchange. 72 :keyword routing_key: Binding key. 73 :keyword exchange_type: Type of exchange. 74 :keyword \*\*options: Additional declaration options. 75 76 """ 77 q = self[queue] = self.options(exchange, routing_key, 78 exchange_type, **options) 79 return q 80 81 def options(self, exchange, routing_key, 82 exchange_type="direct", **options): 83 """Creates new option mapping for queue, with required 84 keys present.""" 85 return dict(options, routing_key=routing_key, 86 binding_key=routing_key, 87 exchange=exchange, 88 exchange_type=exchange_type) 89 90 def format(self, indent=0, indent_first=True): 91 """Format routing table into string for log dumps.""" 92 active = self.consume_from 93 if not active: 94 return "" 95 info = [QUEUE_FORMAT.strip() % dict( 96 name=(name + ":").ljust(12), **config) 97 for name, config in sorted(active.iteritems())] 98 if indent_first: 99 return textindent("\n".join(info), indent) 100 return info[0] + "\n" + textindent("\n".join(info[1:]), indent) 101 102 def select_subset(self, wanted, create_missing=True): 103 """Select subset of the currently defined queues. 104 105 Does not return anything: queues not in `wanted` will 106 be discarded in-place. 107 108 :param wanted: List of wanted queue names. 109 :keyword create_missing: By default any unknown queues will be 110 added automatically, but if disabled 111 the occurrence of unknown queues 112 in `wanted` will raise :exc:`KeyError`. 113 114 """ 115 acc = {} 116 for queue in wanted: 117 try: 118 options = self[queue] 119 except KeyError: 120 if not create_missing: 121 raise 122 options = self.options(queue, queue) 123 acc[queue] = options 124 self._consume_from = acc 125 self.update(acc) 126 127 @property 128 def consume_from(self): 129 if self._consume_from is not None: 130 return self._consume_from 131 return self 132 133 @classmethod 134 def with_defaults(cls, queues, default_exchange, default_exchange_type): 135 """Alternate constructor that adds default exchange and 136 exchange type information to queues that does not have any.""" 137 if queues is None: 138 queues = {} 139 for opts in queues.values(): 140 opts.setdefault("exchange", default_exchange), 141 opts.setdefault("exchange_type", default_exchange_type) 142 opts.setdefault("binding_key", default_exchange) 143 opts.setdefault("routing_key", opts.get("binding_key")) 144 return cls(queues) 145 146 147 class TaskPublisher(messaging.Publisher): 148 auto_declare = True 149 retry = False 150 retry_policy = None 151 152 def __init__(self, *args, **kwargs): 153 self.app = kwargs.pop("app") 154 self.retry = kwargs.pop("retry", self.retry) 155 self.retry_policy = kwargs.pop("retry_policy", 156 self.retry_policy or {}) 157 super(TaskPublisher, self).__init__(*args, **kwargs) 158 159 def declare(self): 160 if self.exchange.name and \ 161 self.exchange.name not in _exchanges_declared: 162 super(TaskPublisher, self).declare() 163 _exchanges_declared.add(self.exchange.name) 164 165 def _declare_queue(self, name, retry=False, retry_policy={}): 166 options = self.app.queues[name] 167 queue = messaging.entry_to_queue(name, **options)(self.channel) 168 if retry: 169 self.connection.ensure(queue, queue.declare, **retry_policy)() 170 else: 171 queue.declare() 172 return queue 173 174 def _declare_exchange(self, name, type, retry=False, retry_policy={}): 175 ex = Exchange(name, type=type, durable=self.durable, 176 auto_delete=self.auto_delete)(self.channel) 177 if retry: 178 return self.connection.ensure(ex, ex.declare, **retry_policy) 179 return ex.declare() 180 181 def delay_task(self, task_name, task_args=None, task_kwargs=None, 182 countdown=None, eta=None, task_id=None, taskset_id=None, 183 expires=None, exchange=None, exchange_type=None, 184 event_dispatcher=None, retry=None, retry_policy=None, 185 queue=None, now=None, retries=0, chord=None, **kwargs): 186 """Send task message.""" 187 188 connection = self.connection 189 _retry_policy = self.retry_policy 190 if retry_policy: # merge default and custom policy 191 _retry_policy = dict(_retry_policy, **retry_policy) 192 193 # declare entities 194 if queue and queue not in _queues_declared: 195 entity = self._declare_queue(queue, retry, _retry_policy) 196 _exchanges_declared.add(entity.exchange.name) 197 _queues_declared.add(entity.name) 198 if exchange and exchange not in _exchanges_declared: 199 self._declare_exchange(exchange, 200 exchange_type or self.exchange_type, retry, _retry_policy) 201 _exchanges_declared.add(exchange) 202 203 task_id = task_id or uuid() 204 task_args = task_args or [] 205 task_kwargs = task_kwargs or {} 206 if not isinstance(task_args, (list, tuple)): 207 raise ValueError("task args must be a list or tuple") 208 if not isinstance(task_kwargs, dict): 209 raise ValueError("task kwargs must be a dictionary") 210 if countdown: # Convert countdown to ETA. 211 now = now or datetime.now() 212 eta = now + timedelta(seconds=countdown) 213 if isinstance(expires, int): 214 now = now or datetime.now() 215 expires = now + timedelta(seconds=expires) 216 eta = eta and eta.isoformat() 217 expires = expires and expires.isoformat() 218 219 body = {"task": task_name, 220 "id": task_id, 221 "args": task_args or [], 222 "kwargs": task_kwargs or {}, 223 "retries": retries or 0, 224 "eta": eta, 225 "expires": expires} 226 227 if taskset_id: 228 body["taskset"] = taskset_id 229 if chord: 230 body["chord"] = chord 231 232 do_retry = retry if retry is not None else self.retry 233 send = self.send 234 if do_retry: 235 send = connection.ensure(self, self.send, **_retry_policy) 236 send(body, exchange=exchange, **extract_msg_options(kwargs)) 237 signals.task_sent.send(sender=task_name, **body) 238 if event_dispatcher: 239 event_dispatcher.send("task-sent", uuid=task_id, 240 name=task_name, 241 args=repr(task_args), 242 kwargs=repr(task_kwargs), 243 retries=retries, 244 eta=eta, 245 expires=expires) 246 return task_id 247 248 def __exit__(self, *exc_info): 249 try: 250 self.release() 251 except AttributeError: 252 self.close() 253 254 255 class PublisherPool(ProducerPool): 256 257 def __init__(self, app): 258 self.app = app 259 super(PublisherPool, self).__init__(self.app.pool, 260 limit=self.app.pool.limit) 261 262 def create_producer(self): 263 conn = self.connections.acquire(block=True) 264 pub = self.app.amqp.TaskPublisher(conn, auto_declare=False) 265 conn._producer_chan = pub.channel 266 return pub 267 268 269 class AMQP(object): 270 BrokerConnection = BrokerConnection 271 Publisher = messaging.Publisher 272 Consumer = messaging.Consumer 273 ConsumerSet = messaging.ConsumerSet 274 275 #: Cached and prepared routing table. 276 _rtable = None 277 278 def __init__(self, app): 279 self.app = app 280 281 def flush_routes(self): 282 self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES) 283 284 def Queues(self, queues): 285 """Create new :class:`Queues` instance, using queue defaults 286 from the current configuration.""" 287 conf = self.app.conf 288 if not queues and conf.CELERY_DEFAULT_QUEUE: 289 queues = {conf.CELERY_DEFAULT_QUEUE: { 290 "exchange": conf.CELERY_DEFAULT_EXCHANGE, 291 "exchange_type": conf.CELERY_DEFAULT_EXCHANGE_TYPE, 292 "binding_key": conf.CELERY_DEFAULT_ROUTING_KEY}} 293 return Queues.with_defaults(queues, conf.CELERY_DEFAULT_EXCHANGE, 294 conf.CELERY_DEFAULT_EXCHANGE_TYPE) 295 296 def Router(self, queues=None, create_missing=None): 297 """Returns the current task router.""" 298 return _routes.Router(self.routes, queues or self.queues, 299 self.app.either("CELERY_CREATE_MISSING_QUEUES", 300 create_missing), app=self.app) 301 302 def TaskConsumer(self, *args, **kwargs): 303 """Returns consumer for a single task queue.""" 304 default_queue_name, default_queue = self.get_default_queue() 305 defaults = dict({"queue": default_queue_name}, **default_queue) 306 defaults["routing_key"] = defaults.pop("binding_key", None) 307 return self.Consumer(*args, 308 **self.app.merge(defaults, kwargs)) 309 310 def TaskPublisher(self, *args, **kwargs): 311 """Returns publisher used to send tasks. 312 313 You should use `app.send_task` instead. 314 315 """ 316 conf = self.app.conf 317 _, default_queue = self.get_default_queue() 318 defaults = {"exchange": default_queue["exchange"], 319 "exchange_type": default_queue["exchange_type"], 320 "routing_key": conf.CELERY_DEFAULT_ROUTING_KEY, 321 "serializer": conf.CELERY_TASK_SERIALIZER, 322 "retry": conf.CELERY_TASK_PUBLISH_RETRY, 323 "retry_policy": conf.CELERY_TASK_PUBLISH_RETRY_POLICY, 324 "app": self} 325 return TaskPublisher(*args, **self.app.merge(defaults, kwargs)) 326 327 def get_task_consumer(self, connection, queues=None, **kwargs): 328 """Return consumer configured to consume from all known task 329 queues.""" 330 return self.ConsumerSet(connection, 331 from_dict=queues or self.queues.consume_from, 332 **kwargs) 333 334 def get_default_queue(self): 335 """Returns `(queue_name, queue_options)` tuple for the queue 336 configured to be default (:setting:`CELERY_DEFAULT_QUEUE`).""" 337 q = self.app.conf.CELERY_DEFAULT_QUEUE 338 return q, self.queues[q] 339 340 @cached_property 341 def queues(self): 342 """Queue nameβ‡’ declaration mapping.""" 343 return self.Queues(self.app.conf.CELERY_QUEUES) 344 345 @property 346 def routes(self): 347 if self._rtable is None: 348 self.flush_routes() 349 return self._rtable 350 351 @cached_property 352 def publisher_pool(self): 353 return PublisherPool(self.app) 354 [end of celery/app/amqp.py] [start of celery/app/base.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.app.base 4 ~~~~~~~~~~~~~~~ 5 6 Application Base Class. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 from __future__ import with_statement 14 15 import os 16 import platform as _platform 17 18 from contextlib import contextmanager 19 from copy import deepcopy 20 from functools import wraps 21 from threading import Lock 22 23 from .. import datastructures 24 from .. import platforms 25 from ..utils import cached_property, instantiate, lpmerge 26 27 from .defaults import DEFAULTS, find_deprecated_settings 28 29 import kombu 30 if kombu.VERSION < (1, 1, 0): 31 raise ImportError("Celery requires Kombu version 1.1.0 or higher.") 32 33 BUGREPORT_INFO = """ 34 platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s 35 software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s 36 settings -> transport:%(transport)s results:%(results)s 37 """ 38 39 40 class LamportClock(object): 41 """Lamport's logical clock. 42 43 From Wikipedia: 44 45 "A Lamport logical clock is a monotonically incrementing software counter 46 maintained in each process. It follows some simple rules: 47 48 * A process increments its counter before each event in that process; 49 * When a process sends a message, it includes its counter value with 50 the message; 51 * On receiving a message, the receiver process sets its counter to be 52 greater than the maximum of its own value and the received value 53 before it considers the message received. 54 55 Conceptually, this logical clock can be thought of as a clock that only 56 has meaning in relation to messages moving between processes. When a 57 process receives a message, it resynchronizes its logical clock with 58 the sender. 59 60 .. seealso:: 61 62 http://en.wikipedia.org/wiki/Lamport_timestamps 63 http://en.wikipedia.org/wiki/Lamport's_Distributed_ 64 Mutual_Exclusion_Algorithm 65 66 *Usage* 67 68 When sending a message use :meth:`forward` to increment the clock, 69 when receiving a message use :meth:`adjust` to sync with 70 the time stamp of the incoming message. 71 72 """ 73 #: The clocks current value. 74 value = 0 75 76 def __init__(self, initial_value=0): 77 self.value = initial_value 78 self.mutex = Lock() 79 80 def adjust(self, other): 81 with self.mutex: 82 self.value = max(self.value, other) + 1 83 84 def forward(self): 85 with self.mutex: 86 self.value += 1 87 return self.value 88 89 90 class Settings(datastructures.ConfigurationView): 91 92 @property 93 def CELERY_RESULT_BACKEND(self): 94 """Resolves deprecated alias ``CELERY_BACKEND``.""" 95 return self.get("CELERY_RESULT_BACKEND") or self.get("CELERY_BACKEND") 96 97 @property 98 def BROKER_TRANSPORT(self): 99 """Resolves compat aliases :setting:`BROKER_BACKEND` 100 and :setting:`CARROT_BACKEND`.""" 101 return (self.get("BROKER_TRANSPORT") or 102 self.get("BROKER_BACKEND") or 103 self.get("CARROT_BACKEND")) 104 105 @property 106 def BROKER_BACKEND(self): 107 """Deprecated compat alias to :attr:`BROKER_TRANSPORT`.""" 108 return self.BROKER_TRANSPORT 109 110 @property 111 def BROKER_HOST(self): 112 113 return (os.environ.get("CELERY_BROKER_URL") or 114 self.get("BROKER_URL") or 115 self.get("BROKER_HOST")) 116 117 118 class BaseApp(object): 119 """Base class for apps.""" 120 SYSTEM = platforms.SYSTEM 121 IS_OSX = platforms.IS_OSX 122 IS_WINDOWS = platforms.IS_WINDOWS 123 124 amqp_cls = "celery.app.amqp.AMQP" 125 backend_cls = None 126 events_cls = "celery.events.Events" 127 loader_cls = "celery.loaders.app.AppLoader" 128 log_cls = "celery.log.Logging" 129 control_cls = "celery.task.control.Control" 130 131 _pool = None 132 133 def __init__(self, main=None, loader=None, backend=None, 134 amqp=None, events=None, log=None, control=None, 135 set_as_current=True, accept_magic_kwargs=False, **kwargs): 136 self.main = main 137 self.amqp_cls = amqp or self.amqp_cls 138 self.backend_cls = backend or self.backend_cls 139 self.events_cls = events or self.events_cls 140 self.loader_cls = loader or self.loader_cls 141 self.log_cls = log or self.log_cls 142 self.control_cls = control or self.control_cls 143 self.set_as_current = set_as_current 144 self.accept_magic_kwargs = accept_magic_kwargs 145 self.clock = LamportClock() 146 147 self.on_init() 148 149 def on_init(self): 150 """Called at the end of the constructor.""" 151 pass 152 153 def config_from_object(self, obj, silent=False): 154 """Read configuration from object, where object is either 155 a object, or the name of a module to import. 156 157 >>> celery.config_from_object("myapp.celeryconfig") 158 159 >>> from myapp import celeryconfig 160 >>> celery.config_from_object(celeryconfig) 161 162 """ 163 del(self.conf) 164 return self.loader.config_from_object(obj, silent=silent) 165 166 def config_from_envvar(self, variable_name, silent=False): 167 """Read configuration from environment variable. 168 169 The value of the environment variable must be the name 170 of a module to import. 171 172 >>> os.environ["CELERY_CONFIG_MODULE"] = "myapp.celeryconfig" 173 >>> celery.config_from_envvar("CELERY_CONFIG_MODULE") 174 175 """ 176 del(self.conf) 177 return self.loader.config_from_envvar(variable_name, silent=silent) 178 179 def config_from_cmdline(self, argv, namespace="celery"): 180 """Read configuration from argv. 181 182 The config 183 184 """ 185 self.conf.update(self.loader.cmdline_config_parser(argv, namespace)) 186 187 def send_task(self, name, args=None, kwargs=None, countdown=None, 188 eta=None, task_id=None, publisher=None, connection=None, 189 connect_timeout=None, result_cls=None, expires=None, 190 queues=None, **options): 191 """Send task by name. 192 193 :param name: Name of task to execute (e.g. `"tasks.add"`). 194 :keyword result_cls: Specify custom result class. Default is 195 using :meth:`AsyncResult`. 196 197 Supports the same arguments as 198 :meth:`~celery.app.task.BaseTask.apply_async`. 199 200 """ 201 router = self.amqp.Router(queues) 202 result_cls = result_cls or self.AsyncResult 203 204 options.setdefault("compression", 205 self.conf.CELERY_MESSAGE_COMPRESSION) 206 options = router.route(options, name, args, kwargs) 207 exchange = options.get("exchange") 208 exchange_type = options.get("exchange_type") 209 210 with self.default_connection(connection, connect_timeout) as conn: 211 publish = publisher or self.amqp.TaskPublisher(conn, 212 exchange=exchange, 213 exchange_type=exchange_type) 214 try: 215 new_id = publish.delay_task(name, args, kwargs, 216 task_id=task_id, 217 countdown=countdown, eta=eta, 218 expires=expires, **options) 219 finally: 220 publisher or publish.close() 221 return result_cls(new_id) 222 223 def AsyncResult(self, task_id, backend=None, task_name=None): 224 """Create :class:`celery.result.BaseAsyncResult` instance.""" 225 from ..result import BaseAsyncResult 226 return BaseAsyncResult(task_id, app=self, task_name=task_name, 227 backend=backend or self.backend) 228 229 def TaskSetResult(self, taskset_id, results, **kwargs): 230 """Create :class:`celery.result.TaskSetResult` instance.""" 231 from ..result import TaskSetResult 232 return TaskSetResult(taskset_id, results, app=self) 233 234 def broker_connection(self, hostname=None, userid=None, 235 password=None, virtual_host=None, port=None, ssl=None, 236 insist=None, connect_timeout=None, transport=None, 237 transport_options=None, **kwargs): 238 """Establish a connection to the message broker. 239 240 :keyword hostname: defaults to the :setting:`BROKER_HOST` setting. 241 :keyword userid: defaults to the :setting:`BROKER_USER` setting. 242 :keyword password: defaults to the :setting:`BROKER_PASSWORD` setting. 243 :keyword virtual_host: defaults to the :setting:`BROKER_VHOST` setting. 244 :keyword port: defaults to the :setting:`BROKER_PORT` setting. 245 :keyword ssl: defaults to the :setting:`BROKER_USE_SSL` setting. 246 :keyword insist: defaults to the :setting:`BROKER_INSIST` setting. 247 :keyword connect_timeout: defaults to the 248 :setting:`BROKER_CONNECTION_TIMEOUT` setting. 249 :keyword backend_cls: defaults to the :setting:`BROKER_TRANSPORT` 250 setting. 251 252 :returns :class:`kombu.connection.BrokerConnection`: 253 254 """ 255 conf = self.conf 256 return self.amqp.BrokerConnection( 257 hostname or conf.BROKER_HOST, 258 userid or conf.BROKER_USER, 259 password or conf.BROKER_PASSWORD, 260 virtual_host or conf.BROKER_VHOST, 261 port or conf.BROKER_PORT, 262 transport=transport or conf.BROKER_TRANSPORT, 263 insist=self.either("BROKER_INSIST", insist), 264 ssl=self.either("BROKER_USE_SSL", ssl), 265 connect_timeout=self.either( 266 "BROKER_CONNECTION_TIMEOUT", connect_timeout), 267 transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS, 268 **transport_options or {})) 269 270 @contextmanager 271 def default_connection(self, connection=None, connect_timeout=None): 272 """For use within a with-statement to get a connection from the pool 273 if one is not already provided. 274 275 :keyword connection: If not provided, then a connection will be 276 acquired from the connection pool. 277 :keyword connect_timeout: *No longer used.* 278 279 """ 280 if connection: 281 yield connection 282 else: 283 with self.pool.acquire(block=True) as connection: 284 yield connection 285 286 def with_default_connection(self, fun): 287 """With any function accepting `connection` and `connect_timeout` 288 keyword arguments, establishes a default connection if one is 289 not already passed to it. 290 291 Any automatically established connection will be closed after 292 the function returns. 293 294 **Deprecated** 295 296 Use ``with app.default_connection(connection)`` instead. 297 298 """ 299 @wraps(fun) 300 def _inner(*args, **kwargs): 301 connection = kwargs.pop("connection", None) 302 with self.default_connection(connection) as c: 303 return fun(*args, **dict(kwargs, connection=c)) 304 return _inner 305 306 def prepare_config(self, c): 307 """Prepare configuration before it is merged with the defaults.""" 308 find_deprecated_settings(c) 309 return c 310 311 def mail_admins(self, subject, body, fail_silently=False): 312 """Send an email to the admins in the :setting:`ADMINS` setting.""" 313 if self.conf.ADMINS: 314 to = [admin_email for _, admin_email in self.conf.ADMINS] 315 return self.loader.mail_admins(subject, body, fail_silently, to=to, 316 sender=self.conf.SERVER_EMAIL, 317 host=self.conf.EMAIL_HOST, 318 port=self.conf.EMAIL_PORT, 319 user=self.conf.EMAIL_HOST_USER, 320 password=self.conf.EMAIL_HOST_PASSWORD, 321 timeout=self.conf.EMAIL_TIMEOUT, 322 use_ssl=self.conf.EMAIL_USE_SSL, 323 use_tls=self.conf.EMAIL_USE_TLS) 324 325 def either(self, default_key, *values): 326 """Fallback to the value of a configuration key if none of the 327 `*values` are true.""" 328 for value in values: 329 if value is not None: 330 return value 331 return self.conf.get(default_key) 332 333 def merge(self, l, r): 334 """Like `dict(a, **b)` except it will keep values from `a` 335 if the value in `b` is :const:`None`.""" 336 return lpmerge(l, r) 337 338 def _get_backend(self): 339 from ..backends import get_backend_cls 340 backend_cls = self.backend_cls or self.conf.CELERY_RESULT_BACKEND 341 backend_cls = get_backend_cls(backend_cls, loader=self.loader) 342 return backend_cls(app=self) 343 344 def _get_config(self): 345 return Settings({}, [self.prepare_config(self.loader.conf), 346 deepcopy(DEFAULTS)]) 347 348 def _after_fork(self, obj_): 349 if self._pool: 350 self._pool.force_close_all() 351 self._pool = None 352 353 def bugreport(self): 354 import celery 355 import kombu 356 return BUGREPORT_INFO % {"system": _platform.system(), 357 "arch": _platform.architecture(), 358 "py_i": platforms.pyimplementation(), 359 "celery_v": celery.__version__, 360 "kombu_v": kombu.__version__, 361 "py_v": _platform.python_version(), 362 "transport": self.conf.BROKER_TRANSPORT, 363 "results": self.conf.CELERY_RESULT_BACKEND} 364 365 @property 366 def pool(self): 367 if self._pool is None: 368 try: 369 from multiprocessing.util import register_after_fork 370 register_after_fork(self, self._after_fork) 371 except ImportError: 372 pass 373 limit = self.conf.BROKER_POOL_LIMIT 374 self._pool = self.broker_connection().Pool(limit) 375 return self._pool 376 377 @cached_property 378 def amqp(self): 379 """Sending/receiving messages. See :class:`~celery.app.amqp.AMQP`.""" 380 return instantiate(self.amqp_cls, app=self) 381 382 @cached_property 383 def backend(self): 384 """Storing/retrieving task state. See 385 :class:`~celery.backend.base.BaseBackend`.""" 386 return self._get_backend() 387 388 @cached_property 389 def conf(self): 390 """Current configuration (dict and attribute access).""" 391 return self._get_config() 392 393 @cached_property 394 def control(self): 395 """Controlling worker nodes. See 396 :class:`~celery.task.control.Control`.""" 397 return instantiate(self.control_cls, app=self) 398 399 @cached_property 400 def events(self): 401 """Sending/receiving events. See :class:`~celery.events.Events`. """ 402 return instantiate(self.events_cls, app=self) 403 404 @cached_property 405 def loader(self): 406 """Current loader.""" 407 from ..loaders import get_loader_cls 408 return get_loader_cls(self.loader_cls)(app=self) 409 410 @cached_property 411 def log(self): 412 """Logging utilities. See :class:`~celery.log.Logging`.""" 413 return instantiate(self.log_cls, app=self) 414 [end of celery/app/base.py] [start of celery/app/defaults.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.app.defaults 4 ~~~~~~~~~~~~~~~~~~~ 5 6 Configuration introspection and defaults. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 import sys 15 16 from collections import deque 17 from datetime import timedelta 18 19 is_jython = sys.platform.startswith("java") 20 is_pypy = hasattr(sys, "pypy_version_info") 21 22 DEFAULT_POOL = "processes" 23 if is_jython: 24 DEFAULT_POOL = "threads" 25 elif is_pypy: 26 if sys.pypy_version_info[0:3] < (1, 5, 0): 27 DEFAULT_POOL = "solo" 28 else: 29 DEFAULT_POOL = "processes" 30 31 32 DEFAULT_PROCESS_LOG_FMT = """ 33 [%(asctime)s: %(levelname)s/%(processName)s] %(message)s 34 """.strip() 35 DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s' 36 DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \ 37 %(task_name)s[%(task_id)s]: %(message)s""" 38 39 40 def str_to_bool(term, table={"false": False, "no": False, "0": False, 41 "true": True, "yes": True, "1": True}): 42 try: 43 return table[term.lower()] 44 except KeyError: 45 raise TypeError("%r can not be converted to type bool" % (term, )) 46 47 48 class Option(object): 49 alt = None 50 deprecate_by = None 51 remove_by = None 52 typemap = dict(string=str, int=int, float=float, any=lambda v: v, 53 bool=str_to_bool, dict=dict, tuple=tuple) 54 55 def __init__(self, default=None, *args, **kwargs): 56 self.default = default 57 self.type = kwargs.get("type") or "string" 58 for attr, value in kwargs.iteritems(): 59 setattr(self, attr, value) 60 61 def to_python(self, value): 62 return self.typemap[self.type](value) 63 64 65 NAMESPACES = { 66 "BROKER": { 67 "URL": Option(None, type="string"), 68 "HOST": Option(None, type="string"), 69 "PORT": Option(type="int"), 70 "USER": Option(None, type="string"), 71 "PASSWORD": Option(None, type="string"), 72 "VHOST": Option(None, type="string"), 73 "CONNECTION_TIMEOUT": Option(4, type="int"), 74 "CONNECTION_RETRY": Option(True, type="bool"), 75 "CONNECTION_MAX_RETRIES": Option(100, type="int"), 76 "POOL_LIMIT": Option(None, type="int"), 77 "INSIST": Option(False, type="bool", 78 deprecate_by="2.4", remove_by="3.0"), 79 "USE_SSL": Option(False, type="bool"), 80 "TRANSPORT": Option(None, type="string"), 81 "TRANSPORT_OPTIONS": Option({}, type="dict"), 82 }, 83 "CELERY": { 84 "ACKS_LATE": Option(False, type="bool"), 85 "ALWAYS_EAGER": Option(False, type="bool"), 86 "AMQP_TASK_RESULT_EXPIRES": Option(type="int", 87 deprecate_by="2.5", remove_by="3.0", 88 alt="CELERY_TASK_RESULT_EXPIRES"), 89 "AMQP_TASK_RESULT_CONNECTION_MAX": Option(1, type="int", 90 remove_by="2.5", alt="BROKER_POOL_LIMIT"), 91 "BROADCAST_QUEUE": Option("celeryctl"), 92 "BROADCAST_EXCHANGE": Option("celeryctl"), 93 "BROADCAST_EXCHANGE_TYPE": Option("fanout"), 94 "CACHE_BACKEND": Option(), 95 "CACHE_BACKEND_OPTIONS": Option({}, type="dict"), 96 "CREATE_MISSING_QUEUES": Option(True, type="bool"), 97 "DEFAULT_RATE_LIMIT": Option(type="string"), 98 "DISABLE_RATE_LIMITS": Option(False, type="bool"), 99 "DEFAULT_ROUTING_KEY": Option("celery"), 100 "DEFAULT_QUEUE": Option("celery"), 101 "DEFAULT_EXCHANGE": Option("celery"), 102 "DEFAULT_EXCHANGE_TYPE": Option("direct"), 103 "DEFAULT_DELIVERY_MODE": Option(2, type="string"), 104 "EAGER_PROPAGATES_EXCEPTIONS": Option(False, type="bool"), 105 "EVENT_SERIALIZER": Option("json"), 106 "IMPORTS": Option((), type="tuple"), 107 "IGNORE_RESULT": Option(False, type="bool"), 108 "MAX_CACHED_RESULTS": Option(5000, type="int"), 109 "MESSAGE_COMPRESSION": Option(None, type="string"), 110 "MONGODB_BACKEND_SETTINGS": Option(None, type="dict"), 111 "REDIS_HOST": Option(None, type="string"), 112 "REDIS_PORT": Option(None, type="int"), 113 "REDIS_DB": Option(None, type="int"), 114 "REDIS_PASSWORD": Option(None, type="string"), 115 "RESULT_BACKEND": Option(None, type="string"), 116 "RESULT_DB_SHORT_LIVED_SESSIONS": Option(False, type="bool"), 117 "RESULT_DBURI": Option(), 118 "RESULT_ENGINE_OPTIONS": Option(None, type="dict"), 119 "RESULT_EXCHANGE": Option("celeryresults"), 120 "RESULT_EXCHANGE_TYPE": Option("direct"), 121 "RESULT_SERIALIZER": Option("pickle"), 122 "RESULT_PERSISTENT": Option(False, type="bool"), 123 "ROUTES": Option(None, type="any"), 124 "SEND_EVENTS": Option(False, type="bool"), 125 "SEND_TASK_ERROR_EMAILS": Option(False, type="bool"), 126 "SEND_TASK_SENT_EVENT": Option(False, type="bool"), 127 "STORE_ERRORS_EVEN_IF_IGNORED": Option(False, type="bool"), 128 "TASK_ERROR_WHITELIST": Option((), type="tuple", 129 deprecate_by="2.5", remove_by="3.0"), 130 "TASK_PUBLISH_RETRY": Option(True, type="bool"), 131 "TASK_PUBLISH_RETRY_POLICY": Option({ 132 "max_retries": 100, 133 "interval_start": 0, 134 "interval_max": 1, 135 "interval_step": 0.2}, type="dict"), 136 "TASK_RESULT_EXPIRES": Option(timedelta(days=1), type="int"), 137 "TASK_SERIALIZER": Option("pickle"), 138 "TRACK_STARTED": Option(False, type="bool"), 139 "REDIRECT_STDOUTS": Option(True, type="bool"), 140 "REDIRECT_STDOUTS_LEVEL": Option("WARNING"), 141 "QUEUES": Option(None, type="dict"), 142 }, 143 "CELERYD": { 144 "AUTOSCALER": Option("celery.worker.autoscale.Autoscaler"), 145 "CONCURRENCY": Option(0, type="int"), 146 "ETA_SCHEDULER": Option(None, type="str"), 147 "ETA_SCHEDULER_PRECISION": Option(1.0, type="float"), 148 "HIJACK_ROOT_LOGGER": Option(True, type="bool"), 149 "CONSUMER": Option("celery.worker.consumer.Consumer"), 150 "LOG_FORMAT": Option(DEFAULT_PROCESS_LOG_FMT), 151 "LOG_COLOR": Option(type="bool"), 152 "LOG_LEVEL": Option("WARN", deprecate_by="2.4", remove_by="3.0", 153 alt="--loglevel argument"), 154 "LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"), 155 "MEDIATOR": Option("celery.worker.mediator.Mediator"), 156 "MAX_TASKS_PER_CHILD": Option(type="int"), 157 "POOL": Option(DEFAULT_POOL), 158 "POOL_PUTLOCKS": Option(True, type="bool"), 159 "PREFETCH_MULTIPLIER": Option(4, type="int"), 160 "STATE_DB": Option(), 161 "TASK_LOG_FORMAT": Option(DEFAULT_TASK_LOG_FMT), 162 "TASK_SOFT_TIME_LIMIT": Option(type="int"), 163 "TASK_TIME_LIMIT": Option(type="int"), 164 }, 165 "CELERYBEAT": { 166 "SCHEDULE": Option({}, type="dict"), 167 "SCHEDULER": Option("celery.beat.PersistentScheduler"), 168 "SCHEDULE_FILENAME": Option("celerybeat-schedule"), 169 "MAX_LOOP_INTERVAL": Option(5 * 60, type="int"), 170 "LOG_LEVEL": Option("INFO", deprecate_by="2.4", remove_by="3.0"), 171 "LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"), 172 }, 173 "CELERYMON": { 174 "LOG_LEVEL": Option("INFO", deprecate_by="2.4", remove_by="3.0"), 175 "LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"), 176 "LOG_FORMAT": Option(DEFAULT_LOG_FMT), 177 }, 178 "EMAIL": { 179 "HOST": Option("localhost"), 180 "PORT": Option(25, type="int"), 181 "HOST_USER": Option(None), 182 "HOST_PASSWORD": Option(None), 183 "TIMEOUT": Option(2, type="int"), 184 "USE_SSL": Option(False, type="bool"), 185 "USE_TLS": Option(False, type="bool"), 186 }, 187 "SERVER_EMAIL": Option("celery@localhost"), 188 "ADMINS": Option((), type="tuple"), 189 "TT": { 190 "HOST": Option(None, type="string"), 191 "PORT": Option(None, type="int"), 192 }, 193 } 194 195 196 def flatten(d, ns=""): 197 stack = deque([(ns, d)]) 198 while stack: 199 name, space = stack.popleft() 200 for key, value in space.iteritems(): 201 if isinstance(value, dict): 202 stack.append((name + key + '_', value)) 203 else: 204 yield name + key, value 205 206 207 def find_deprecated_settings(source): 208 from celery.utils import warn_deprecated 209 for name, opt in flatten(NAMESPACES): 210 if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None): 211 warn_deprecated(description="The %r setting" % (name, ), 212 deprecation=opt.deprecate_by, 213 removal=opt.remove_by, 214 alternative=opt.alt) 215 216 217 DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES)) 218 [end of celery/app/defaults.py] [start of celery/app/task/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.app.task 4 ~~~~~~~~~~~~~~~ 5 6 Tasks Implementation. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 13 from __future__ import absolute_import 14 15 import sys 16 import threading 17 18 from ...datastructures import ExceptionInfo 19 from ...exceptions import MaxRetriesExceededError, RetryTaskError 20 from ...execute.trace import TaskTrace 21 from ...registry import tasks, _unpickle_task 22 from ...result import EagerResult 23 from ...utils import fun_takes_kwargs, mattrgetter, uuid 24 from ...utils.mail import ErrorMail 25 26 extract_exec_options = mattrgetter("queue", "routing_key", 27 "exchange", "immediate", 28 "mandatory", "priority", 29 "serializer", "delivery_mode", 30 "compression") 31 32 33 class Context(threading.local): 34 # Default context 35 logfile = None 36 loglevel = None 37 hostname = None 38 id = None 39 args = None 40 kwargs = None 41 retries = 0 42 is_eager = False 43 delivery_info = None 44 taskset = None 45 chord = None 46 called_directly = True 47 48 def update(self, d, **kwargs): 49 self.__dict__.update(d, **kwargs) 50 51 def clear(self): 52 self.__dict__.clear() 53 54 def get(self, key, default=None): 55 try: 56 return getattr(self, key) 57 except AttributeError: 58 return default 59 60 61 class TaskType(type): 62 """Meta class for tasks. 63 64 Automatically registers the task in the task registry, except 65 if the `abstract` attribute is set. 66 67 If no `name` attribute is provided, then no name is automatically 68 set to the name of the module it was defined in, and the class name. 69 70 """ 71 72 def __new__(cls, name, bases, attrs): 73 new = super(TaskType, cls).__new__ 74 task_module = attrs.get("__module__") or "__main__" 75 76 # Abstract class: abstract attribute should not be inherited. 77 if attrs.pop("abstract", None) or not attrs.get("autoregister", True): 78 return new(cls, name, bases, attrs) 79 80 # Automatically generate missing/empty name. 81 autoname = False 82 if not attrs.get("name"): 83 try: 84 module_name = sys.modules[task_module].__name__ 85 except KeyError: # pragma: no cover 86 # Fix for manage.py shell_plus (Issue #366). 87 module_name = task_module 88 attrs["name"] = '.'.join([module_name, name]) 89 autoname = True 90 91 # Because of the way import happens (recursively) 92 # we may or may not be the first time the task tries to register 93 # with the framework. There should only be one class for each task 94 # name, so we always return the registered version. 95 task_name = attrs["name"] 96 if task_name not in tasks: 97 task_cls = new(cls, name, bases, attrs) 98 if autoname and task_module == "__main__" and task_cls.app.main: 99 task_name = task_cls.name = '.'.join([task_cls.app.main, name]) 100 tasks.register(task_cls) 101 task = tasks[task_name].__class__ 102 return task 103 104 def __repr__(cls): 105 return "<class Task of %s>" % (cls.app, ) 106 107 108 class BaseTask(object): 109 """Task base class. 110 111 When called tasks apply the :meth:`run` method. This method must 112 be defined by all tasks (that is unless the :meth:`__call__` method 113 is overridden). 114 115 """ 116 __metaclass__ = TaskType 117 118 ErrorMail = ErrorMail 119 MaxRetriesExceededError = MaxRetriesExceededError 120 121 #: The application instance associated with this task class. 122 app = None 123 124 #: Name of the task. 125 name = None 126 127 #: If :const:`True` the task is an abstract base class. 128 abstract = True 129 130 #: If disabled the worker will not forward magic keyword arguments. 131 #: Deprecated and scheduled for removal in v3.0. 132 accept_magic_kwargs = False 133 134 #: Request context (set when task is applied). 135 request = Context() 136 137 #: Destination queue. The queue needs to exist 138 #: in :setting:`CELERY_QUEUES`. The `routing_key`, `exchange` and 139 #: `exchange_type` attributes will be ignored if this is set. 140 queue = None 141 142 #: Overrides the apps default `routing_key` for this task. 143 routing_key = None 144 145 #: Overrides the apps default `exchange` for this task. 146 exchange = None 147 148 #: Overrides the apps default exchange type for this task. 149 exchange_type = None 150 151 #: Override the apps default delivery mode for this task. Default is 152 #: `"persistent"`, but you can change this to `"transient"`, which means 153 #: messages will be lost if the broker is restarted. Consult your broker 154 #: manual for any additional delivery modes. 155 delivery_mode = None 156 157 #: Mandatory message routing. 158 mandatory = False 159 160 #: Request immediate delivery. 161 immediate = False 162 163 #: Default message priority. A number between 0 to 9, where 0 is the 164 #: highest. Note that RabbitMQ does not support priorities. 165 priority = None 166 167 #: Maximum number of retries before giving up. If set to :const:`None`, 168 #: it will **never** stop retrying. 169 max_retries = 3 170 171 #: Default time in seconds before a retry of the task should be 172 #: executed. 3 minutes by default. 173 default_retry_delay = 3 * 60 174 175 #: Rate limit for this task type. Examples: :const:`None` (no rate 176 #: limit), `"100/s"` (hundred tasks a second), `"100/m"` (hundred tasks 177 #: a minute),`"100/h"` (hundred tasks an hour) 178 rate_limit = None 179 180 #: If enabled the worker will not store task state and return values 181 #: for this task. Defaults to the :setting:`CELERY_IGNORE_RESULT` 182 #: setting. 183 ignore_result = False 184 185 #: When enabled errors will be stored even if the task is otherwise 186 #: configured to ignore results. 187 store_errors_even_if_ignored = False 188 189 #: If enabled an email will be sent to :setting:`ADMINS` whenever a task 190 #: of this type fails. 191 send_error_emails = False 192 disable_error_emails = False # FIXME 193 194 #: List of exception types to send error emails for. 195 error_whitelist = () 196 197 #: The name of a serializer that are registered with 198 #: :mod:`kombu.serialization.registry`. Default is `"pickle"`. 199 serializer = "pickle" 200 201 #: Hard time limit. 202 #: Defaults to the :setting:`CELERY_TASK_TIME_LIMIT` setting. 203 time_limit = None 204 205 #: Soft time limit. 206 #: Defaults to the :setting:`CELERY_TASK_SOFT_TIME_LIMIT` setting. 207 soft_time_limit = None 208 209 #: The result store backend used for this task. 210 backend = None 211 212 #: If disabled this task won't be registered automatically. 213 autoregister = True 214 215 #: If enabled the task will report its status as "started" when the task 216 #: is executed by a worker. Disabled by default as the normal behaviour 217 #: is to not report that level of granularity. Tasks are either pending, 218 #: finished, or waiting to be retried. 219 #: 220 #: Having a "started" status can be useful for when there are long 221 #: running tasks and there is a need to report which task is currently 222 #: running. 223 #: 224 #: The application default can be overridden using the 225 #: :setting:`CELERY_TRACK_STARTED` setting. 226 track_started = False 227 228 #: When enabled messages for this task will be acknowledged **after** 229 #: the task has been executed, and not *just before* which is the 230 #: default behavior. 231 #: 232 #: Please note that this means the task may be executed twice if the 233 #: worker crashes mid execution (which may be acceptable for some 234 #: applications). 235 #: 236 #: The application default can be overridden with the 237 #: :setting:`CELERY_ACKS_LATE` setting. 238 acks_late = False 239 240 #: Default task expiry time. 241 expires = None 242 243 #: The type of task *(no longer used)*. 244 type = "regular" 245 246 def __call__(self, *args, **kwargs): 247 return self.run(*args, **kwargs) 248 249 def __reduce__(self): 250 return (_unpickle_task, (self.name, ), None) 251 252 def run(self, *args, **kwargs): 253 """The body of the task executed by workers.""" 254 raise NotImplementedError("Tasks must define the run method.") 255 256 @classmethod 257 def get_logger(self, loglevel=None, logfile=None, propagate=False, 258 **kwargs): 259 """Get task-aware logger object.""" 260 return self.app.log.setup_task_logger( 261 loglevel=self.request.loglevel if loglevel is None else loglevel, 262 logfile=self.request.logfile if logfile is None else logfile, 263 propagate=propagate, task_name=self.name, task_id=self.request.id) 264 265 @classmethod 266 def establish_connection(self, connect_timeout=None): 267 """Establish a connection to the message broker.""" 268 return self.app.broker_connection(connect_timeout=connect_timeout) 269 270 @classmethod 271 def get_publisher(self, connection=None, exchange=None, 272 connect_timeout=None, exchange_type=None, **options): 273 """Get a celery task message publisher. 274 275 :rtype :class:`~celery.app.amqp.TaskPublisher`: 276 277 .. warning:: 278 279 If you don't specify a connection, one will automatically 280 be established for you, in that case you need to close this 281 connection after use:: 282 283 >>> publisher = self.get_publisher() 284 >>> # ... do something with publisher 285 >>> publisher.connection.close() 286 287 or used as a context:: 288 289 >>> with self.get_publisher() as publisher: 290 ... # ... do something with publisher 291 292 """ 293 exchange = self.exchange if exchange is None else exchange 294 if exchange_type is None: 295 exchange_type = self.exchange_type 296 connection = connection or self.establish_connection(connect_timeout) 297 return self.app.amqp.TaskPublisher(connection=connection, 298 exchange=exchange, 299 exchange_type=exchange_type, 300 routing_key=self.routing_key, 301 **options) 302 303 @classmethod 304 def get_consumer(self, connection=None, connect_timeout=None): 305 """Get message consumer. 306 307 :rtype :class:`kombu.messaging.Consumer`: 308 309 .. warning:: 310 311 If you don't specify a connection, one will automatically 312 be established for you, in that case you need to close this 313 connection after use:: 314 315 >>> consumer = self.get_consumer() 316 >>> # do something with consumer 317 >>> consumer.close() 318 >>> consumer.connection.close() 319 320 """ 321 connection = connection or self.establish_connection(connect_timeout) 322 return self.app.amqp.TaskConsumer(connection=connection, 323 exchange=self.exchange, 324 routing_key=self.routing_key) 325 326 @classmethod 327 def delay(self, *args, **kwargs): 328 """Star argument version of :meth:`apply_async`. 329 330 Does not support the extra options enabled by :meth:`apply_async`. 331 332 :param \*args: positional arguments passed on to the task. 333 :param \*\*kwargs: keyword arguments passed on to the task. 334 335 :returns :class:`celery.result.AsyncResult`: 336 337 """ 338 return self.apply_async(args, kwargs) 339 340 @classmethod 341 def apply_async(self, args=None, kwargs=None, countdown=None, 342 eta=None, task_id=None, publisher=None, connection=None, 343 connect_timeout=None, router=None, expires=None, queues=None, 344 **options): 345 """Apply tasks asynchronously by sending a message. 346 347 :keyword args: The positional arguments to pass on to the 348 task (a :class:`list` or :class:`tuple`). 349 350 :keyword kwargs: The keyword arguments to pass on to the 351 task (a :class:`dict`) 352 353 :keyword countdown: Number of seconds into the future that the 354 task should execute. Defaults to immediate 355 execution (do not confuse with the 356 `immediate` flag, as they are unrelated). 357 358 :keyword eta: A :class:`~datetime.datetime` object describing 359 the absolute time and date of when the task should 360 be executed. May not be specified if `countdown` 361 is also supplied. (Do not confuse this with the 362 `immediate` flag, as they are unrelated). 363 364 :keyword expires: Either a :class:`int`, describing the number of 365 seconds, or a :class:`~datetime.datetime` object 366 that describes the absolute time and date of when 367 the task should expire. The task will not be 368 executed after the expiration time. 369 370 :keyword connection: Re-use existing broker connection instead 371 of establishing a new one. The `connect_timeout` 372 argument is not respected if this is set. 373 374 :keyword connect_timeout: The timeout in seconds, before we give up 375 on establishing a connection to the AMQP 376 server. 377 378 :keyword retry: If enabled sending of the task message will be retried 379 in the event of connection loss or failure. Default 380 is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY` 381 setting. Note you need to handle the 382 publisher/connection manually for this to work. 383 384 :keyword retry_policy: Override the retry policy used. See the 385 :setting:`CELERY_TASK_PUBLISH_RETRY` setting. 386 387 :keyword routing_key: The routing key used to route the task to a 388 worker server. Defaults to the 389 :attr:`routing_key` attribute. 390 391 :keyword exchange: The named exchange to send the task to. 392 Defaults to the :attr:`exchange` attribute. 393 394 :keyword exchange_type: The exchange type to initialize the exchange 395 if not already declared. Defaults to the 396 :attr:`exchange_type` attribute. 397 398 :keyword immediate: Request immediate delivery. Will raise an 399 exception if the task cannot be routed to a worker 400 immediately. (Do not confuse this parameter with 401 the `countdown` and `eta` settings, as they are 402 unrelated). Defaults to the :attr:`immediate` 403 attribute. 404 405 :keyword mandatory: Mandatory routing. Raises an exception if 406 there's no running workers able to take on this 407 task. Defaults to the :attr:`mandatory` 408 attribute. 409 410 :keyword priority: The task priority, a number between 0 and 9. 411 Defaults to the :attr:`priority` attribute. 412 413 :keyword serializer: A string identifying the default 414 serialization method to use. Can be `pickle`, 415 `json`, `yaml`, `msgpack` or any custom 416 serialization method that has been registered 417 with :mod:`kombu.serialization.registry`. 418 Defaults to the :attr:`serializer` attribute. 419 420 :keyword compression: A string identifying the compression method 421 to use. Can be one of ``zlib``, ``bzip2``, 422 or any custom compression methods registered with 423 :func:`kombu.compression.register`. Defaults to 424 the :setting:`CELERY_MESSAGE_COMPRESSION` 425 setting. 426 427 .. note:: 428 If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will 429 be replaced by a local :func:`apply` call instead. 430 431 """ 432 router = self.app.amqp.Router(queues) 433 conf = self.app.conf 434 435 if conf.CELERY_ALWAYS_EAGER: 436 return self.apply(args, kwargs, task_id=task_id) 437 438 options.setdefault("compression", 439 conf.CELERY_MESSAGE_COMPRESSION) 440 options = dict(extract_exec_options(self), **options) 441 options = router.route(options, self.name, args, kwargs) 442 expires = expires or self.expires 443 444 publish = publisher or self.app.amqp.publisher_pool.acquire(block=True) 445 evd = None 446 if conf.CELERY_SEND_TASK_SENT_EVENT: 447 evd = self.app.events.Dispatcher(channel=publish.channel, 448 buffer_while_offline=False) 449 450 try: 451 task_id = publish.delay_task(self.name, args, kwargs, 452 task_id=task_id, 453 countdown=countdown, 454 eta=eta, expires=expires, 455 event_dispatcher=evd, 456 **options) 457 finally: 458 if not publisher: 459 publish.release() 460 461 return self.AsyncResult(task_id) 462 463 @classmethod 464 def retry(self, args=None, kwargs=None, exc=None, throw=True, 465 eta=None, countdown=None, max_retries=None, **options): 466 """Retry the task. 467 468 :param args: Positional arguments to retry with. 469 :param kwargs: Keyword arguments to retry with. 470 :keyword exc: Optional exception to raise instead of 471 :exc:`~celery.exceptions.MaxRetriesExceededError` 472 when the max restart limit has been exceeded. 473 :keyword countdown: Time in seconds to delay the retry for. 474 :keyword eta: Explicit time and date to run the retry at 475 (must be a :class:`~datetime.datetime` instance). 476 :keyword max_retries: If set, overrides the default retry limit. 477 :keyword \*\*options: Any extra options to pass on to 478 meth:`apply_async`. 479 :keyword throw: If this is :const:`False`, do not raise the 480 :exc:`~celery.exceptions.RetryTaskError` exception, 481 that tells the worker to mark the task as being 482 retried. Note that this means the task will be 483 marked as failed if the task raises an exception, 484 or successful if it returns. 485 486 :raises celery.exceptions.RetryTaskError: To tell the worker that 487 the task has been re-sent for retry. This always happens, 488 unless the `throw` keyword argument has been explicitly set 489 to :const:`False`, and is considered normal operation. 490 491 **Example** 492 493 .. code-block:: python 494 495 >>> @task 496 >>> def tweet(auth, message): 497 ... twitter = Twitter(oauth=auth) 498 ... try: 499 ... twitter.post_status_update(message) 500 ... except twitter.FailWhale, exc: 501 ... # Retry in 5 minutes. 502 ... return tweet.retry(countdown=60 * 5, exc=exc) 503 504 Although the task will never return above as `retry` raises an 505 exception to notify the worker, we use `return` in front of the retry 506 to convey that the rest of the block will not be executed. 507 508 """ 509 request = self.request 510 max_retries = self.max_retries if max_retries is None else max_retries 511 args = request.args if args is None else args 512 kwargs = request.kwargs if kwargs is None else kwargs 513 delivery_info = request.delivery_info 514 515 # Not in worker or emulated by (apply/always_eager), 516 # so just raise the original exception. 517 if request.called_directly: 518 raise exc or RetryTaskError("Task can be retried", None) 519 520 if delivery_info: 521 options.setdefault("exchange", delivery_info.get("exchange")) 522 options.setdefault("routing_key", delivery_info.get("routing_key")) 523 524 if not eta and countdown is None: 525 countdown = self.default_retry_delay 526 527 options.update({"retries": request.retries + 1, 528 "task_id": request.id, 529 "countdown": countdown, 530 "eta": eta}) 531 532 if max_retries is not None and options["retries"] > max_retries: 533 raise exc or self.MaxRetriesExceededError( 534 "Can't retry %s[%s] args:%s kwargs:%s" % ( 535 self.name, options["task_id"], args, kwargs)) 536 537 # If task was executed eagerly using apply(), 538 # then the retry must also be executed eagerly. 539 if request.is_eager: 540 return self.apply(args=args, kwargs=kwargs, **options).get() 541 542 self.apply_async(args=args, kwargs=kwargs, **options) 543 if throw: 544 raise RetryTaskError( 545 eta and "Retry at %s" % (eta, ) 546 or "Retry in %s secs." % (countdown, ), exc) 547 548 @classmethod 549 def apply(self, args=None, kwargs=None, **options): 550 """Execute this task locally, by blocking until the task returns. 551 552 :param args: positional arguments passed on to the task. 553 :param kwargs: keyword arguments passed on to the task. 554 :keyword throw: Re-raise task exceptions. Defaults to 555 the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` 556 setting. 557 558 :rtype :class:`celery.result.EagerResult`: 559 560 """ 561 args = args or [] 562 kwargs = kwargs or {} 563 task_id = options.get("task_id") or uuid() 564 retries = options.get("retries", 0) 565 throw = self.app.either("CELERY_EAGER_PROPAGATES_EXCEPTIONS", 566 options.pop("throw", None)) 567 568 # Make sure we get the task instance, not class. 569 task = tasks[self.name] 570 571 request = {"id": task_id, 572 "retries": retries, 573 "is_eager": True, 574 "logfile": options.get("logfile"), 575 "loglevel": options.get("loglevel", 0), 576 "delivery_info": {"is_eager": True}} 577 if self.accept_magic_kwargs: 578 default_kwargs = {"task_name": task.name, 579 "task_id": task_id, 580 "task_retries": retries, 581 "task_is_eager": True, 582 "logfile": options.get("logfile"), 583 "loglevel": options.get("loglevel", 0), 584 "delivery_info": {"is_eager": True}} 585 supported_keys = fun_takes_kwargs(task.run, default_kwargs) 586 extend_with = dict((key, val) 587 for key, val in default_kwargs.items() 588 if key in supported_keys) 589 kwargs.update(extend_with) 590 591 trace = TaskTrace(task.name, task_id, args, kwargs, 592 task=task, request=request, propagate=throw) 593 retval = trace.execute() 594 if isinstance(retval, ExceptionInfo): 595 retval = retval.exception 596 return EagerResult(task_id, retval, trace.status, 597 traceback=trace.strtb) 598 599 @classmethod 600 def AsyncResult(self, task_id): 601 """Get AsyncResult instance for this kind of task. 602 603 :param task_id: Task id to get result for. 604 605 """ 606 return self.app.AsyncResult(task_id, backend=self.backend, 607 task_name=self.name) 608 609 def update_state(self, task_id=None, state=None, meta=None): 610 """Update task state. 611 612 :param task_id: Id of the task to update. 613 :param state: New state (:class:`str`). 614 :param meta: State metadata (:class:`dict`). 615 616 """ 617 if task_id is None: 618 task_id = self.request.id 619 self.backend.store_result(task_id, meta, state) 620 621 def on_retry(self, exc, task_id, args, kwargs, einfo): 622 """Retry handler. 623 624 This is run by the worker when the task is to be retried. 625 626 :param exc: The exception sent to :meth:`retry`. 627 :param task_id: Unique id of the retried task. 628 :param args: Original arguments for the retried task. 629 :param kwargs: Original keyword arguments for the retried task. 630 631 :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` 632 instance, containing the traceback. 633 634 The return value of this handler is ignored. 635 636 """ 637 pass 638 639 def after_return(self, status, retval, task_id, args, kwargs, einfo): 640 """Handler called after the task returns. 641 642 :param status: Current task state. 643 :param retval: Task return value/exception. 644 :param task_id: Unique id of the task. 645 :param args: Original arguments for the task that failed. 646 :param kwargs: Original keyword arguments for the task 647 that failed. 648 649 :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` 650 instance, containing the traceback (if any). 651 652 The return value of this handler is ignored. 653 654 """ 655 if self.request.chord: 656 self.backend.on_chord_part_return(self) 657 658 def on_failure(self, exc, task_id, args, kwargs, einfo): 659 """Error handler. 660 661 This is run by the worker when the task fails. 662 663 :param exc: The exception raised by the task. 664 :param task_id: Unique id of the failed task. 665 :param args: Original arguments for the task that failed. 666 :param kwargs: Original keyword arguments for the task 667 that failed. 668 669 :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` 670 instance, containing the traceback. 671 672 The return value of this handler is ignored. 673 674 """ 675 pass 676 677 def send_error_email(self, context, exc, **kwargs): 678 if self.send_error_emails and not self.disable_error_emails: 679 sender = self.ErrorMail(self, **kwargs) 680 sender.send(context, exc) 681 682 def on_success(self, retval, task_id, args, kwargs): 683 """Success handler. 684 685 Run by the worker if the task executes successfully. 686 687 :param retval: The return value of the task. 688 :param task_id: Unique id of the executed task. 689 :param args: Original arguments for the executed task. 690 :param kwargs: Original keyword arguments for the executed task. 691 692 The return value of this handler is ignored. 693 694 """ 695 pass 696 697 def execute(self, request, pool, loglevel, logfile, **kwargs): 698 """The method the worker calls to execute the task. 699 700 :param request: A :class:`~celery.worker.job.TaskRequest`. 701 :param pool: A task pool. 702 :param loglevel: Current loglevel. 703 :param logfile: Name of the currently used logfile. 704 705 :keyword consumer: The :class:`~celery.worker.consumer.Consumer`. 706 707 """ 708 request.execute_using_pool(pool, loglevel, logfile) 709 710 def __repr__(self): 711 """`repr(task)`""" 712 return "<@task: %s>" % (self.name, ) 713 714 @classmethod 715 def subtask(cls, *args, **kwargs): 716 """Returns :class:`~celery.task.sets.subtask` object for 717 this task, wrapping arguments and execution options 718 for a single task invocation.""" 719 from ...task.sets import subtask 720 return subtask(cls, *args, **kwargs) 721 722 @property 723 def __name__(self): 724 return self.__class__.__name__ 725 [end of celery/app/task/__init__.py] [start of celery/apps/beat.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 import atexit 5 import socket 6 import sys 7 import traceback 8 9 from .. import __version__, platforms 10 from .. import beat 11 from ..app import app_or_default 12 from ..utils import get_full_cls_name, LOG_LEVELS 13 from ..utils.timeutils import humanize_seconds 14 15 STARTUP_INFO_FMT = """ 16 Configuration -> 17 . broker -> %(conninfo)s 18 . loader -> %(loader)s 19 . scheduler -> %(scheduler)s 20 %(scheduler_info)s 21 . logfile -> %(logfile)s@%(loglevel)s 22 . maxinterval -> %(hmax_interval)s (%(max_interval)ss) 23 """.strip() 24 25 26 class Beat(object): 27 Service = beat.Service 28 29 def __init__(self, loglevel=None, logfile=None, schedule=None, 30 max_interval=None, scheduler_cls=None, app=None, 31 socket_timeout=30, redirect_stdouts=None, 32 redirect_stdouts_level=None, pidfile=None, **kwargs): 33 """Starts the celerybeat task scheduler.""" 34 self.app = app = app_or_default(app) 35 36 self.loglevel = loglevel or app.conf.CELERYBEAT_LOG_LEVEL 37 self.logfile = logfile or app.conf.CELERYBEAT_LOG_FILE 38 self.schedule = schedule or app.conf.CELERYBEAT_SCHEDULE_FILENAME 39 self.scheduler_cls = scheduler_cls or app.conf.CELERYBEAT_SCHEDULER 40 self.max_interval = max_interval 41 self.socket_timeout = socket_timeout 42 self.colored = app.log.colored(self.logfile) 43 self.redirect_stdouts = (redirect_stdouts or 44 app.conf.CELERY_REDIRECT_STDOUTS) 45 self.redirect_stdouts_level = (redirect_stdouts_level or 46 app.conf.CELERY_REDIRECT_STDOUTS_LEVEL) 47 self.pidfile = pidfile 48 49 if not isinstance(self.loglevel, int): 50 self.loglevel = LOG_LEVELS[self.loglevel.upper()] 51 52 def run(self): 53 logger = self.setup_logging() 54 print(str(self.colored.cyan( 55 "celerybeat v%s is starting." % __version__))) 56 self.init_loader() 57 self.set_process_title() 58 self.start_scheduler(logger) 59 60 def setup_logging(self): 61 handled = self.app.log.setup_logging_subsystem(loglevel=self.loglevel, 62 logfile=self.logfile) 63 logger = self.app.log.get_default_logger(name="celery.beat") 64 if self.redirect_stdouts and not handled: 65 self.app.log.redirect_stdouts_to_logger(logger, 66 loglevel=self.redirect_stdouts_level) 67 return logger 68 69 def start_scheduler(self, logger=None): 70 c = self.colored 71 if self.pidfile: 72 pidlock = platforms.create_pidlock(self.pidfile).acquire() 73 atexit.register(pidlock.release) 74 beat = self.Service(app=self.app, 75 logger=logger, 76 max_interval=self.max_interval, 77 scheduler_cls=self.scheduler_cls, 78 schedule_filename=self.schedule) 79 80 print(str(c.blue("__ ", c.magenta("-"), 81 c.blue(" ... __ "), c.magenta("-"), 82 c.blue(" _\n"), 83 c.reset(self.startup_info(beat))))) 84 if self.socket_timeout: 85 logger.debug("Setting default socket timeout to %r", 86 self.socket_timeout) 87 socket.setdefaulttimeout(self.socket_timeout) 88 try: 89 self.install_sync_handler(beat) 90 beat.start() 91 except Exception, exc: 92 logger.critical("celerybeat raised exception %s: %r\n%s", 93 exc.__class__, exc, traceback.format_exc(), 94 exc_info=sys.exc_info()) 95 96 def init_loader(self): 97 # Run the worker init handler. 98 # (Usually imports task modules and such.) 99 self.app.loader.init_worker() 100 101 def startup_info(self, beat): 102 scheduler = beat.get_scheduler(lazy=True) 103 return STARTUP_INFO_FMT % { 104 "conninfo": self.app.broker_connection().as_uri(), 105 "logfile": self.logfile or "[stderr]", 106 "loglevel": LOG_LEVELS[self.loglevel], 107 "loader": get_full_cls_name(self.app.loader.__class__), 108 "scheduler": get_full_cls_name(scheduler.__class__), 109 "scheduler_info": scheduler.info, 110 "hmax_interval": humanize_seconds(beat.max_interval), 111 "max_interval": beat.max_interval, 112 } 113 114 def set_process_title(self): 115 arg_start = "manage" in sys.argv[0] and 2 or 1 116 platforms.set_process_title("celerybeat", 117 info=" ".join(sys.argv[arg_start:])) 118 119 def install_sync_handler(self, beat): 120 """Install a `SIGTERM` + `SIGINT` handler that saves 121 the celerybeat schedule.""" 122 123 def _sync(signum, frame): 124 beat.sync() 125 raise SystemExit() 126 127 platforms.signals.update(SIGTERM=_sync, SIGINT=_sync) 128 [end of celery/apps/beat.py] [start of celery/apps/worker.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 import atexit 5 import logging 6 try: 7 import multiprocessing 8 except ImportError: 9 multiprocessing = None # noqa 10 import os 11 import socket 12 import sys 13 import warnings 14 15 from .. import __version__, platforms, signals 16 from ..app import app_or_default 17 from ..exceptions import ImproperlyConfigured, SystemTerminate 18 from ..utils import get_full_cls_name, isatty, LOG_LEVELS, cry 19 from ..worker import WorkController 20 21 try: 22 from greenlet import GreenletExit 23 IGNORE_ERRORS = (GreenletExit, ) 24 except ImportError: 25 IGNORE_ERRORS = () 26 27 28 BANNER = """ 29 -------------- celery@%(hostname)s v%(version)s 30 ---- **** ----- 31 --- * *** * -- [Configuration] 32 -- * - **** --- . broker: %(conninfo)s 33 - ** ---------- . loader: %(loader)s 34 - ** ---------- . logfile: %(logfile)s@%(loglevel)s 35 - ** ---------- . concurrency: %(concurrency)s 36 - ** ---------- . events: %(events)s 37 - *** --- * --- . beat: %(celerybeat)s 38 -- ******* ---- 39 --- ***** ----- [Queues] 40 -------------- %(queues)s 41 """ 42 43 EXTRA_INFO_FMT = """ 44 [Tasks] 45 %(tasks)s 46 """ 47 48 49 def cpu_count(): 50 if multiprocessing is not None: 51 try: 52 return multiprocessing.cpu_count() 53 except NotImplementedError: 54 pass 55 return 2 56 57 58 def get_process_name(): 59 if multiprocessing is not None: 60 return multiprocessing.current_process().name 61 62 63 class Worker(object): 64 WorkController = WorkController 65 66 def __init__(self, concurrency=None, loglevel=None, logfile=None, 67 hostname=None, discard=False, run_clockservice=False, 68 schedule=None, task_time_limit=None, task_soft_time_limit=None, 69 max_tasks_per_child=None, queues=None, events=None, db=None, 70 include=None, app=None, pidfile=None, 71 redirect_stdouts=None, redirect_stdouts_level=None, 72 autoscale=None, scheduler_cls=None, pool=None, **kwargs): 73 self.app = app = app_or_default(app) 74 conf = app.conf 75 self.concurrency = (concurrency or 76 conf.CELERYD_CONCURRENCY or cpu_count()) 77 self.loglevel = loglevel or conf.CELERYD_LOG_LEVEL 78 self.logfile = logfile or conf.CELERYD_LOG_FILE 79 80 self.hostname = hostname or socket.gethostname() 81 self.discard = discard 82 self.run_clockservice = run_clockservice 83 if self.app.IS_WINDOWS and self.run_clockservice: 84 self.die("-B option does not work on Windows. " 85 "Please run celerybeat as a separate service.") 86 self.schedule = schedule or conf.CELERYBEAT_SCHEDULE_FILENAME 87 self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER 88 self.events = events if events is not None else conf.CELERY_SEND_EVENTS 89 self.task_time_limit = (task_time_limit or 90 conf.CELERYD_TASK_TIME_LIMIT) 91 self.task_soft_time_limit = (task_soft_time_limit or 92 conf.CELERYD_TASK_SOFT_TIME_LIMIT) 93 self.max_tasks_per_child = (max_tasks_per_child or 94 conf.CELERYD_MAX_TASKS_PER_CHILD) 95 self.redirect_stdouts = (redirect_stdouts or 96 conf.CELERY_REDIRECT_STDOUTS) 97 self.redirect_stdouts_level = (redirect_stdouts_level or 98 conf.CELERY_REDIRECT_STDOUTS_LEVEL) 99 self.pool = pool or conf.CELERYD_POOL 100 self.db = db 101 self.use_queues = [] if queues is None else queues 102 self.queues = None 103 self.include = [] if include is None else include 104 self.pidfile = pidfile 105 self.autoscale = None 106 if autoscale: 107 max_c, _, min_c = autoscale.partition(",") 108 self.autoscale = [int(max_c), min_c and int(min_c) or 0] 109 self._isatty = isatty(sys.stdout) 110 111 self.colored = app.log.colored(self.logfile) 112 113 if isinstance(self.use_queues, basestring): 114 self.use_queues = self.use_queues.split(",") 115 if isinstance(self.include, basestring): 116 self.include = self.include.split(",") 117 118 if not isinstance(self.loglevel, int): 119 try: 120 self.loglevel = LOG_LEVELS[self.loglevel.upper()] 121 except KeyError: 122 self.die("Unknown level %r. Please use one of %s." % ( 123 self.loglevel, 124 "|".join(l for l in LOG_LEVELS.keys() 125 if isinstance(l, basestring)))) 126 127 def run(self): 128 self.init_loader() 129 self.init_queues() 130 self.worker_init() 131 self.redirect_stdouts_to_logger() 132 133 if getattr(os, "getuid", None) and os.getuid() == 0: 134 warnings.warn( 135 "Running celeryd with superuser privileges is discouraged!") 136 137 if self.discard: 138 self.purge_messages() 139 140 # Dump configuration to screen so we have some basic information 141 # for when users sends bug reports. 142 print(str(self.colored.cyan(" \n", self.startup_info())) + 143 str(self.colored.reset(self.extra_info()))) 144 self.set_process_status("-active-") 145 146 try: 147 self.run_worker() 148 except IGNORE_ERRORS: 149 pass 150 151 def on_consumer_ready(self, consumer): 152 signals.worker_ready.send(sender=consumer) 153 print("celery@%s has started." % self.hostname) 154 155 def init_queues(self): 156 if self.use_queues: 157 create_missing = self.app.conf.CELERY_CREATE_MISSING_QUEUES 158 try: 159 self.app.amqp.queues.select_subset(self.use_queues, 160 create_missing) 161 except KeyError, exc: 162 raise ImproperlyConfigured( 163 "Trying to select queue subset of %r, but queue %s" 164 "is not defined in CELERY_QUEUES. If you want to " 165 "automatically declare unknown queues you have to " 166 "enable CELERY_CREATE_MISSING_QUEUES" % ( 167 self.use_queues, exc)) 168 169 def init_loader(self): 170 self.loader = self.app.loader 171 self.settings = self.app.conf 172 for module in self.include: 173 self.loader.import_from_cwd(module) 174 175 def redirect_stdouts_to_logger(self): 176 handled = self.app.log.setup_logging_subsystem(loglevel=self.loglevel, 177 logfile=self.logfile) 178 if not handled: 179 logger = self.app.log.get_default_logger() 180 if self.redirect_stdouts: 181 self.app.log.redirect_stdouts_to_logger(logger, 182 loglevel=self.redirect_stdouts_level) 183 184 def purge_messages(self): 185 count = self.app.control.discard_all() 186 what = (not count or count > 1) and "messages" or "message" 187 print("discard: Erased %d %s from the queue.\n" % (count, what)) 188 189 def worker_init(self): 190 # Run the worker init handler. 191 # (Usually imports task modules and such.) 192 self.loader.init_worker() 193 194 def tasklist(self, include_builtins=True): 195 from ..registry import tasks 196 tasklist = tasks.keys() 197 if not include_builtins: 198 tasklist = filter(lambda s: not s.startswith("celery."), 199 tasklist) 200 return "\n".join(" . %s" % task for task in sorted(tasklist)) 201 202 def extra_info(self): 203 if self.loglevel <= logging.INFO: 204 include_builtins = self.loglevel <= logging.DEBUG 205 tasklist = self.tasklist(include_builtins=include_builtins) 206 return EXTRA_INFO_FMT % {"tasks": tasklist} 207 return "" 208 209 def startup_info(self): 210 app = self.app 211 concurrency = self.concurrency 212 if self.autoscale: 213 cmax, cmin = self.autoscale 214 concurrency = "{min=%s, max=%s}" % (cmin, cmax) 215 return BANNER % { 216 "hostname": self.hostname, 217 "version": __version__, 218 "conninfo": self.app.broker_connection().as_uri(), 219 "concurrency": concurrency, 220 "loglevel": LOG_LEVELS[self.loglevel], 221 "logfile": self.logfile or "[stderr]", 222 "celerybeat": "ON" if self.run_clockservice else "OFF", 223 "events": "ON" if self.events else "OFF", 224 "loader": get_full_cls_name(self.loader.__class__), 225 "queues": app.amqp.queues.format(indent=18, indent_first=False), 226 } 227 228 def run_worker(self): 229 if self.pidfile: 230 pidlock = platforms.create_pidlock(self.pidfile).acquire() 231 atexit.register(pidlock.release) 232 worker = self.WorkController(app=self.app, 233 concurrency=self.concurrency, 234 loglevel=self.loglevel, 235 logfile=self.logfile, 236 hostname=self.hostname, 237 ready_callback=self.on_consumer_ready, 238 embed_clockservice=self.run_clockservice, 239 schedule_filename=self.schedule, 240 scheduler_cls=self.scheduler_cls, 241 send_events=self.events, 242 db=self.db, 243 max_tasks_per_child=self.max_tasks_per_child, 244 task_time_limit=self.task_time_limit, 245 task_soft_time_limit=self.task_soft_time_limit, 246 autoscale=self.autoscale, 247 pool_cls=self.pool) 248 self.install_platform_tweaks(worker) 249 worker.start() 250 251 def install_platform_tweaks(self, worker): 252 """Install platform specific tweaks and workarounds.""" 253 if self.app.IS_OSX: 254 self.osx_proxy_detection_workaround() 255 256 # Install signal handler so SIGHUP restarts the worker. 257 if not self._isatty: 258 # only install HUP handler if detached from terminal, 259 # so closing the terminal window doesn't restart celeryd 260 # into the background. 261 if self.app.IS_OSX: 262 # OS X can't exec from a process using threads. 263 # See http://github.com/ask/celery/issues#issue/152 264 install_HUP_not_supported_handler(worker) 265 else: 266 install_worker_restart_handler(worker) 267 install_worker_term_handler(worker) 268 install_worker_int_handler(worker) 269 install_cry_handler(worker.logger) 270 install_rdb_handler() 271 signals.worker_init.send(sender=worker) 272 273 def osx_proxy_detection_workaround(self): 274 """See http://github.com/ask/celery/issues#issue/161""" 275 os.environ.setdefault("celery_dummy_proxy", "set_by_celeryd") 276 277 def set_process_status(self, info): 278 info = "%s (%s)" % (info, platforms.strargv(sys.argv)) 279 return platforms.set_mp_process_title("celeryd", 280 info=info, 281 hostname=self.hostname) 282 283 def die(self, msg, exitcode=1): 284 sys.stderr.write("Error: %s\n" % (msg, )) 285 sys.exit(exitcode) 286 287 288 def install_worker_int_handler(worker): 289 290 def _stop(signum, frame): 291 process_name = get_process_name() 292 if not process_name or process_name == "MainProcess": 293 print("celeryd: Hitting Ctrl+C again will terminate " 294 "all running tasks!") 295 install_worker_int_again_handler(worker) 296 print("celeryd: Warm shutdown (%s)" % (process_name, )) 297 worker.stop(in_sighandler=True) 298 raise SystemExit() 299 300 platforms.signals["SIGINT"] = _stop 301 302 303 def install_worker_int_again_handler(worker): 304 305 def _stop(signum, frame): 306 process_name = get_process_name() 307 if not process_name or process_name == "MainProcess": 308 print("celeryd: Cold shutdown (%s)" % (process_name, )) 309 worker.terminate(in_sighandler=True) 310 raise SystemTerminate() 311 312 platforms.signals["SIGINT"] = _stop 313 314 315 def install_worker_term_handler(worker): 316 317 def _stop(signum, frame): 318 process_name = get_process_name() 319 if not process_name or process_name == "MainProcess": 320 print("celeryd: Warm shutdown (%s)" % (process_name, )) 321 worker.stop(in_sighandler=True) 322 raise SystemExit() 323 324 platforms.signals["SIGTERM"] = _stop 325 326 327 def install_worker_restart_handler(worker): 328 329 def restart_worker_sig_handler(signum, frame): 330 """Signal handler restarting the current python program.""" 331 print("Restarting celeryd (%s)" % (" ".join(sys.argv), )) 332 worker.stop(in_sighandler=True) 333 os.execv(sys.executable, [sys.executable] + sys.argv) 334 335 platforms.signals["SIGHUP"] = restart_worker_sig_handler 336 337 338 def install_cry_handler(logger): 339 # Jython/PyPy does not have sys._current_frames 340 is_jython = sys.platform.startswith("java") 341 is_pypy = hasattr(sys, "pypy_version_info") 342 if not (is_jython or is_pypy): 343 344 def cry_handler(signum, frame): 345 """Signal handler logging the stacktrace of all active threads.""" 346 logger.error("\n" + cry()) 347 348 platforms.signals["SIGUSR1"] = cry_handler 349 350 351 def install_rdb_handler(envvar="CELERY_RDBSIG"): # pragma: no cover 352 353 def rdb_handler(signum, frame): 354 """Signal handler setting a rdb breakpoint at the current frame.""" 355 from ..contrib import rdb 356 rdb.set_trace(frame) 357 358 if os.environ.get(envvar): 359 platforms.signals["SIGUSR2"] = rdb_handler 360 361 362 def install_HUP_not_supported_handler(worker): 363 364 def warn_on_HUP_handler(signum, frame): 365 worker.logger.error("SIGHUP not supported: " 366 "Restarting with HUP is unstable on this platform!") 367 368 platforms.signals["SIGHUP"] = warn_on_HUP_handler 369 [end of celery/apps/worker.py] [start of celery/backends/__init__.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 from .. import current_app 5 from ..local import Proxy 6 from ..utils import get_cls_by_name 7 from ..utils.functional import memoize 8 9 BACKEND_ALIASES = { 10 "amqp": "celery.backends.amqp.AMQPBackend", 11 "cache": "celery.backends.cache.CacheBackend", 12 "redis": "celery.backends.redis.RedisBackend", 13 "mongodb": "celery.backends.mongodb.MongoBackend", 14 "tyrant": "celery.backends.tyrant.TyrantBackend", 15 "database": "celery.backends.database.DatabaseBackend", 16 "cassandra": "celery.backends.cassandra.CassandraBackend", 17 "disabled": "celery.backends.base.DisabledBackend", 18 } 19 20 21 @memoize(100) 22 def get_backend_cls(backend=None, loader=None): 23 """Get backend class by name/alias""" 24 backend = backend or "disabled" 25 loader = loader or current_app.loader 26 aliases = dict(BACKEND_ALIASES, **loader.override_backends) 27 try: 28 return get_cls_by_name(backend, aliases) 29 except ValueError, exc: 30 raise ValueError("Unknown result backend: %r. " 31 "Did you spell it correctly? (%s)" % (backend, exc)) 32 33 34 # deprecate this. 35 default_backend = Proxy(lambda: current_app.backend) 36 [end of celery/backends/__init__.py] [start of celery/backends/amqp.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 from __future__ import with_statement 4 5 import socket 6 import threading 7 import time 8 9 from itertools import count 10 11 from kombu.entity import Exchange, Queue 12 from kombu.messaging import Consumer, Producer 13 14 from .. import states 15 from ..exceptions import TimeoutError 16 17 from .base import BaseDictBackend 18 19 20 class BacklogLimitExceeded(Exception): 21 """Too much state history to fast-forward.""" 22 23 24 def repair_uuid(s): 25 # Historically the dashes in UUIDS are removed from AMQ entity names, 26 # but there is no known reason to. Hopefully we'll be able to fix 27 # this in v3.0. 28 return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:]) 29 30 31 class AMQPBackend(BaseDictBackend): 32 """Publishes results by sending messages.""" 33 Exchange = Exchange 34 Queue = Queue 35 Consumer = Consumer 36 Producer = Producer 37 38 BacklogLimitExceeded = BacklogLimitExceeded 39 40 def __init__(self, connection=None, exchange=None, exchange_type=None, 41 persistent=None, serializer=None, auto_delete=True, 42 **kwargs): 43 super(AMQPBackend, self).__init__(**kwargs) 44 conf = self.app.conf 45 self._connection = connection 46 self.queue_arguments = {} 47 self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None 48 else persistent) 49 delivery_mode = persistent and "persistent" or "transient" 50 exchange = exchange or conf.CELERY_RESULT_EXCHANGE 51 exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE 52 self.exchange = self.Exchange(name=exchange, 53 type=exchange_type, 54 delivery_mode=delivery_mode, 55 durable=self.persistent, 56 auto_delete=auto_delete) 57 self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER 58 self.auto_delete = auto_delete 59 60 # AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be 61 # removed in version 3.0. 62 dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES 63 64 self.expires = None 65 if "expires" in kwargs: 66 if kwargs["expires"] is not None: 67 self.expires = self.prepare_expires(kwargs["expires"]) 68 else: 69 self.expires = self.prepare_expires(dexpires) 70 71 if self.expires: 72 self.queue_arguments["x-expires"] = int(self.expires * 1000) 73 self.mutex = threading.Lock() 74 75 def _create_binding(self, task_id): 76 name = task_id.replace("-", "") 77 return self.Queue(name=name, 78 exchange=self.exchange, 79 routing_key=name, 80 durable=self.persistent, 81 auto_delete=self.auto_delete, 82 queue_arguments=self.queue_arguments) 83 84 def _create_producer(self, task_id, channel): 85 self._create_binding(task_id)(channel).declare() 86 return self.Producer(channel, exchange=self.exchange, 87 routing_key=task_id.replace("-", ""), 88 serializer=self.serializer) 89 90 def _create_consumer(self, bindings, channel): 91 return self.Consumer(channel, bindings, no_ack=True) 92 93 def _publish_result(self, connection, task_id, meta): 94 # cache single channel 95 if connection._default_channel is not None and \ 96 connection._default_channel.connection is None: 97 connection.maybe_close_channel(connection._default_channel) 98 channel = connection.default_channel 99 100 self._create_producer(task_id, channel).publish(meta) 101 102 def revive(self, channel): 103 pass 104 105 def _store_result(self, task_id, result, status, traceback=None, 106 max_retries=20, interval_start=0, interval_step=1, 107 interval_max=1): 108 """Send task return value and status.""" 109 with self.mutex: 110 with self.app.pool.acquire(block=True) as conn: 111 112 def errback(error, delay): 113 print("Couldn't send result for %r: %r. Retry in %rs." % ( 114 task_id, error, delay)) 115 116 send = conn.ensure(self, self._publish_result, 117 max_retries=max_retries, 118 errback=errback, 119 interval_start=interval_start, 120 interval_step=interval_step, 121 interval_max=interval_max) 122 send(conn, task_id, {"task_id": task_id, "status": status, 123 "result": self.encode_result(result, status), 124 "traceback": traceback}) 125 return result 126 127 def get_task_meta(self, task_id, cache=True): 128 return self.poll(task_id) 129 130 def wait_for(self, task_id, timeout=None, cache=True, propagate=True, 131 **kwargs): 132 cached_meta = self._cache.get(task_id) 133 if cache and cached_meta and \ 134 cached_meta["status"] in states.READY_STATES: 135 meta = cached_meta 136 else: 137 try: 138 meta = self.consume(task_id, timeout=timeout) 139 except socket.timeout: 140 raise TimeoutError("The operation timed out.") 141 142 state = meta["status"] 143 if state == states.SUCCESS: 144 return meta["result"] 145 elif state in states.PROPAGATE_STATES: 146 if propagate: 147 raise self.exception_to_python(meta["result"]) 148 return meta["result"] 149 else: 150 return self.wait_for(task_id, timeout, cache) 151 152 def poll(self, task_id, backlog_limit=100): 153 with self.app.pool.acquire_channel(block=True) as (_, channel): 154 binding = self._create_binding(task_id)(channel) 155 binding.declare() 156 latest, acc = None, None 157 for i in count(): # fast-forward 158 latest, acc = acc, binding.get(no_ack=True) 159 if not acc: 160 break 161 if i > backlog_limit: 162 raise self.BacklogLimitExceeded(task_id) 163 if latest: 164 payload = self._cache[task_id] = latest.payload 165 return payload 166 elif task_id in self._cache: # use previously received state. 167 return self._cache[task_id] 168 return {"status": states.PENDING, "result": None} 169 170 def drain_events(self, connection, consumer, timeout=None, now=time.time): 171 wait = connection.drain_events 172 results = {} 173 174 def callback(meta, message): 175 if meta["status"] in states.READY_STATES: 176 uuid = repair_uuid(message.delivery_info["routing_key"]) 177 results[uuid] = meta 178 179 consumer.callbacks[:] = [callback] 180 time_start = now() 181 182 while 1: 183 # Total time spent may exceed a single call to wait() 184 if timeout and now() - time_start >= timeout: 185 raise socket.timeout() 186 wait(timeout=timeout) 187 if results: # got event on the wanted channel. 188 break 189 self._cache.update(results) 190 return results 191 192 def consume(self, task_id, timeout=None): 193 with self.app.pool.acquire_channel(block=True) as (conn, channel): 194 binding = self._create_binding(task_id) 195 with self._create_consumer(binding, channel) as consumer: 196 return self.drain_events(conn, consumer, timeout).values()[0] 197 198 def get_many(self, task_ids, timeout=None, **kwargs): 199 with self.app.pool.acquire_channel(block=True) as (conn, channel): 200 ids = set(task_ids) 201 cached_ids = set() 202 for task_id in ids: 203 try: 204 cached = self._cache[task_id] 205 except KeyError: 206 pass 207 else: 208 if cached["status"] in states.READY_STATES: 209 yield task_id, cached 210 cached_ids.add(task_id) 211 212 ids ^= cached_ids 213 bindings = [self._create_binding(task_id) for task_id in task_ids] 214 with self._create_consumer(bindings, channel) as consumer: 215 while ids: 216 r = self.drain_events(conn, consumer, timeout) 217 ids ^= set(r.keys()) 218 for ready_id, ready_meta in r.iteritems(): 219 yield ready_id, ready_meta 220 221 def reload_task_result(self, task_id): 222 raise NotImplementedError( 223 "reload_task_result is not supported by this backend.") 224 225 def reload_taskset_result(self, task_id): 226 """Reload taskset result, even if it has been previously fetched.""" 227 raise NotImplementedError( 228 "reload_taskset_result is not supported by this backend.") 229 230 def save_taskset(self, taskset_id, result): 231 raise NotImplementedError( 232 "save_taskset is not supported by this backend.") 233 234 def restore_taskset(self, taskset_id, cache=True): 235 raise NotImplementedError( 236 "restore_taskset is not supported by this backend.") 237 238 def delete_taskset(self, taskset_id): 239 raise NotImplementedError( 240 "delete_taskset is not supported by this backend.") 241 242 def __reduce__(self, args=(), kwargs={}): 243 kwargs.update( 244 dict(connection=self._connection, 245 exchange=self.exchange.name, 246 exchange_type=self.exchange.type, 247 persistent=self.persistent, 248 serializer=self.serializer, 249 auto_delete=self.auto_delete, 250 expires=self.expires)) 251 return super(AMQPBackend, self).__reduce__(args, kwargs) 252 [end of celery/backends/amqp.py] [start of celery/backends/base.py] 1 # -*- coding: utf-8 -*- 2 """celery.backends.base""" 3 from __future__ import absolute_import 4 5 import time 6 import sys 7 8 from datetime import timedelta 9 10 from kombu import serialization 11 12 from .. import states 13 from ..datastructures import LRUCache 14 from ..exceptions import TimeoutError, TaskRevokedError 15 from ..utils import timeutils 16 from ..utils.encoding import from_utf8 17 from ..utils.serialization import (get_pickled_exception, 18 get_pickleable_exception, 19 create_exception_cls) 20 21 EXCEPTION_ABLE_CODECS = frozenset(["pickle", "yaml"]) 22 is_py3k = sys.version_info >= (3, 0) 23 24 25 def unpickle_backend(cls, args, kwargs): 26 """Returns an unpickled backend.""" 27 return cls(*args, **kwargs) 28 29 30 class BaseBackend(object): 31 """Base backend class.""" 32 READY_STATES = states.READY_STATES 33 UNREADY_STATES = states.UNREADY_STATES 34 EXCEPTION_STATES = states.EXCEPTION_STATES 35 36 TimeoutError = TimeoutError 37 38 #: Time to sleep between polling each individual item 39 #: in `ResultSet.iterate`. as opposed to the `interval` 40 #: argument which is for each pass. 41 subpolling_interval = None 42 43 def __init__(self, *args, **kwargs): 44 from ..app import app_or_default 45 self.app = app_or_default(kwargs.get("app")) 46 self.serializer = kwargs.get("serializer", 47 self.app.conf.CELERY_RESULT_SERIALIZER) 48 (self.content_type, 49 self.content_encoding, 50 self.encoder) = serialization.registry._encoders[self.serializer] 51 52 def encode(self, data): 53 _, _, payload = serialization.encode(data, serializer=self.serializer) 54 return payload 55 56 def decode(self, payload): 57 payload = is_py3k and payload or str(payload) 58 return serialization.decode(payload, 59 content_type=self.content_type, 60 content_encoding=self.content_encoding) 61 62 def prepare_expires(self, value, type=None): 63 if value is None: 64 value = self.app.conf.CELERY_TASK_RESULT_EXPIRES 65 if isinstance(value, timedelta): 66 value = timeutils.timedelta_seconds(value) 67 if value is not None and type: 68 return type(value) 69 return value 70 71 def encode_result(self, result, status): 72 if status in self.EXCEPTION_STATES and isinstance(result, Exception): 73 return self.prepare_exception(result) 74 else: 75 return self.prepare_value(result) 76 77 def store_result(self, task_id, result, status, traceback=None): 78 """Store the result and status of a task.""" 79 raise NotImplementedError( 80 "store_result is not supported by this backend.") 81 82 def mark_as_started(self, task_id, **meta): 83 """Mark a task as started""" 84 return self.store_result(task_id, meta, status=states.STARTED) 85 86 def mark_as_done(self, task_id, result): 87 """Mark task as successfully executed.""" 88 return self.store_result(task_id, result, status=states.SUCCESS) 89 90 def mark_as_failure(self, task_id, exc, traceback=None): 91 """Mark task as executed with failure. Stores the execption.""" 92 return self.store_result(task_id, exc, status=states.FAILURE, 93 traceback=traceback) 94 95 def mark_as_retry(self, task_id, exc, traceback=None): 96 """Mark task as being retries. Stores the current 97 exception (if any).""" 98 return self.store_result(task_id, exc, status=states.RETRY, 99 traceback=traceback) 100 101 def mark_as_revoked(self, task_id): 102 return self.store_result(task_id, TaskRevokedError(), 103 status=states.REVOKED, traceback=None) 104 105 def prepare_exception(self, exc): 106 """Prepare exception for serialization.""" 107 if self.serializer in EXCEPTION_ABLE_CODECS: 108 return get_pickleable_exception(exc) 109 return {"exc_type": type(exc).__name__, "exc_message": str(exc)} 110 111 def exception_to_python(self, exc): 112 """Convert serialized exception to Python exception.""" 113 if self.serializer in EXCEPTION_ABLE_CODECS: 114 return get_pickled_exception(exc) 115 return create_exception_cls(from_utf8(exc["exc_type"]), 116 sys.modules[__name__]) 117 118 def prepare_value(self, result): 119 """Prepare value for storage.""" 120 return result 121 122 def forget(self, task_id): 123 raise NotImplementedError("%s does not implement forget." % ( 124 self.__class__)) 125 126 def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5): 127 """Wait for task and return its result. 128 129 If the task raises an exception, this exception 130 will be re-raised by :func:`wait_for`. 131 132 If `timeout` is not :const:`None`, this raises the 133 :class:`celery.exceptions.TimeoutError` exception if the operation 134 takes longer than `timeout` seconds. 135 136 """ 137 138 time_elapsed = 0.0 139 140 while True: 141 status = self.get_status(task_id) 142 if status == states.SUCCESS: 143 return self.get_result(task_id) 144 elif status in states.PROPAGATE_STATES: 145 result = self.get_result(task_id) 146 if propagate: 147 raise result 148 return result 149 # avoid hammering the CPU checking status. 150 time.sleep(interval) 151 time_elapsed += interval 152 if timeout and time_elapsed >= timeout: 153 raise TimeoutError("The operation timed out.") 154 155 def cleanup(self): 156 """Backend cleanup. Is run by 157 :class:`celery.task.DeleteExpiredTaskMetaTask`.""" 158 pass 159 160 def process_cleanup(self): 161 """Cleanup actions to do at the end of a task worker process.""" 162 pass 163 164 def get_status(self, task_id): 165 """Get the status of a task.""" 166 raise NotImplementedError( 167 "get_status is not supported by this backend.") 168 169 def get_result(self, task_id): 170 """Get the result of a task.""" 171 raise NotImplementedError( 172 "get_result is not supported by this backend.") 173 174 def get_traceback(self, task_id): 175 """Get the traceback for a failed task.""" 176 raise NotImplementedError( 177 "get_traceback is not supported by this backend.") 178 179 def save_taskset(self, taskset_id, result): 180 """Store the result and status of a task.""" 181 raise NotImplementedError( 182 "save_taskset is not supported by this backend.") 183 184 def restore_taskset(self, taskset_id, cache=True): 185 """Get the result of a taskset.""" 186 raise NotImplementedError( 187 "restore_taskset is not supported by this backend.") 188 189 def delete_taskset(self, taskset_id): 190 raise NotImplementedError( 191 "delete_taskset is not supported by this backend.") 192 193 def reload_task_result(self, task_id): 194 """Reload task result, even if it has been previously fetched.""" 195 raise NotImplementedError( 196 "reload_task_result is not supported by this backend.") 197 198 def reload_taskset_result(self, task_id): 199 """Reload taskset result, even if it has been previously fetched.""" 200 raise NotImplementedError( 201 "reload_taskset_result is not supported by this backend.") 202 203 def on_chord_part_return(self, task): 204 pass 205 206 def on_chord_apply(self, setid, body, *args, **kwargs): 207 from ..registry import tasks 208 tasks["celery.chord_unlock"].apply_async((setid, body, ), kwargs, 209 countdown=1) 210 211 def __reduce__(self, args=(), kwargs={}): 212 return (unpickle_backend, (self.__class__, args, kwargs)) 213 214 215 class BaseDictBackend(BaseBackend): 216 217 def __init__(self, *args, **kwargs): 218 super(BaseDictBackend, self).__init__(*args, **kwargs) 219 self._cache = LRUCache(limit=kwargs.get("max_cached_results") or 220 self.app.conf.CELERY_MAX_CACHED_RESULTS) 221 222 def store_result(self, task_id, result, status, traceback=None, **kwargs): 223 """Store task result and status.""" 224 result = self.encode_result(result, status) 225 return self._store_result(task_id, result, status, traceback, **kwargs) 226 227 def forget(self, task_id): 228 self._cache.pop(task_id, None) 229 self._forget(task_id) 230 231 def _forget(self, task_id): 232 raise NotImplementedError("%s does not implement forget." % ( 233 self.__class__)) 234 235 def get_status(self, task_id): 236 """Get the status of a task.""" 237 return self.get_task_meta(task_id)["status"] 238 239 def get_traceback(self, task_id): 240 """Get the traceback for a failed task.""" 241 return self.get_task_meta(task_id).get("traceback") 242 243 def get_result(self, task_id): 244 """Get the result of a task.""" 245 meta = self.get_task_meta(task_id) 246 if meta["status"] in self.EXCEPTION_STATES: 247 return self.exception_to_python(meta["result"]) 248 else: 249 return meta["result"] 250 251 def get_task_meta(self, task_id, cache=True): 252 if cache: 253 try: 254 return self._cache[task_id] 255 except KeyError: 256 pass 257 258 meta = self._get_task_meta_for(task_id) 259 if cache and meta.get("status") == states.SUCCESS: 260 self._cache[task_id] = meta 261 return meta 262 263 def reload_task_result(self, task_id): 264 self._cache[task_id] = self.get_task_meta(task_id, cache=False) 265 266 def reload_taskset_result(self, taskset_id): 267 self._cache[taskset_id] = self.get_taskset_meta(taskset_id, 268 cache=False) 269 270 def get_taskset_meta(self, taskset_id, cache=True): 271 if cache: 272 try: 273 return self._cache[taskset_id] 274 except KeyError: 275 pass 276 277 meta = self._restore_taskset(taskset_id) 278 if cache and meta is not None: 279 self._cache[taskset_id] = meta 280 return meta 281 282 def restore_taskset(self, taskset_id, cache=True): 283 """Get the result for a taskset.""" 284 meta = self.get_taskset_meta(taskset_id, cache=cache) 285 if meta: 286 return meta["result"] 287 288 def save_taskset(self, taskset_id, result): 289 """Store the result of an executed taskset.""" 290 return self._save_taskset(taskset_id, result) 291 292 def delete_taskset(self, taskset_id): 293 self._cache.pop(taskset_id, None) 294 return self._delete_taskset(taskset_id) 295 296 297 class KeyValueStoreBackend(BaseDictBackend): 298 task_keyprefix = "celery-task-meta-" 299 taskset_keyprefix = "celery-taskset-meta-" 300 301 def get(self, key): 302 raise NotImplementedError("Must implement the get method.") 303 304 def mget(self, keys): 305 raise NotImplementedError("Does not support get_many") 306 307 def set(self, key, value): 308 raise NotImplementedError("Must implement the set method.") 309 310 def delete(self, key): 311 raise NotImplementedError("Must implement the delete method") 312 313 def get_key_for_task(self, task_id): 314 """Get the cache key for a task by id.""" 315 return self.task_keyprefix + task_id 316 317 def get_key_for_taskset(self, taskset_id): 318 """Get the cache key for a task by id.""" 319 return self.taskset_keyprefix + taskset_id 320 321 def _strip_prefix(self, key): 322 for prefix in self.task_keyprefix, self.taskset_keyprefix: 323 if key.startswith(prefix): 324 return key[len(prefix):] 325 return key 326 327 def _mget_to_results(self, values, keys): 328 if hasattr(values, "items"): 329 # client returns dict so mapping preserved. 330 return dict((self._strip_prefix(k), self.decode(v)) 331 for k, v in values.iteritems() 332 if v is not None) 333 else: 334 # client returns list so need to recreate mapping. 335 return dict((keys[i], self.decode(value)) 336 for i, value in enumerate(values) 337 if value is not None) 338 339 def get_many(self, task_ids, timeout=None, interval=0.5): 340 ids = set(task_ids) 341 cached_ids = set() 342 for task_id in ids: 343 try: 344 cached = self._cache[task_id] 345 except KeyError: 346 pass 347 else: 348 if cached["status"] in states.READY_STATES: 349 yield task_id, cached 350 cached_ids.add(task_id) 351 352 ids ^= cached_ids 353 iterations = 0 354 while ids: 355 keys = list(ids) 356 r = self._mget_to_results(self.mget([self.get_key_for_task(k) 357 for k in keys]), keys) 358 self._cache.update(r) 359 ids ^= set(r.keys()) 360 for key, value in r.iteritems(): 361 yield key, value 362 if timeout and iterations * interval >= timeout: 363 raise TimeoutError("Operation timed out (%s)" % (timeout, )) 364 time.sleep(interval) # don't busy loop. 365 iterations += 0 366 367 def _forget(self, task_id): 368 self.delete(self.get_key_for_task(task_id)) 369 370 def _store_result(self, task_id, result, status, traceback=None): 371 meta = {"status": status, "result": result, "traceback": traceback} 372 self.set(self.get_key_for_task(task_id), self.encode(meta)) 373 return result 374 375 def _save_taskset(self, taskset_id, result): 376 self.set(self.get_key_for_taskset(taskset_id), 377 self.encode({"result": result})) 378 return result 379 380 def _delete_taskset(self, taskset_id): 381 self.delete(self.get_key_for_taskset(taskset_id)) 382 383 def _get_task_meta_for(self, task_id): 384 """Get task metadata for a task by id.""" 385 meta = self.get(self.get_key_for_task(task_id)) 386 if not meta: 387 return {"status": states.PENDING, "result": None} 388 return self.decode(meta) 389 390 def _restore_taskset(self, taskset_id): 391 """Get task metadata for a task by id.""" 392 meta = self.get(self.get_key_for_taskset(taskset_id)) 393 if meta: 394 return self.decode(meta) 395 396 397 class DisabledBackend(BaseBackend): 398 _cache = {} # need this attribute to reset cache in tests. 399 400 def store_result(self, *args, **kwargs): 401 pass 402 403 def _is_disabled(self, *args, **kwargs): 404 raise NotImplementedError("No result backend configured. " 405 "Please see the documentation for more information.") 406 wait_for = get_status = get_result = get_traceback = _is_disabled 407 [end of celery/backends/base.py] [start of celery/backends/cache.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 from ..datastructures import LRUCache 5 from ..exceptions import ImproperlyConfigured 6 from ..utils import cached_property 7 from ..utils.encoding import ensure_bytes 8 9 from .base import KeyValueStoreBackend 10 11 _imp = [None] 12 13 14 def import_best_memcache(): 15 if _imp[0] is None: 16 is_pylibmc = False 17 try: 18 import pylibmc as memcache 19 is_pylibmc = True 20 except ImportError: 21 try: 22 import memcache # noqa 23 except ImportError: 24 raise ImproperlyConfigured( 25 "Memcached backend requires either the 'pylibmc' " 26 "or 'memcache' library") 27 _imp[0] = is_pylibmc, memcache 28 return _imp[0] 29 30 31 def get_best_memcache(*args, **kwargs): 32 behaviors = kwargs.pop("behaviors", None) 33 is_pylibmc, memcache = import_best_memcache() 34 client = memcache.Client(*args, **kwargs) 35 if is_pylibmc and behaviors is not None: 36 client.behaviors = behaviors 37 return client 38 39 40 class DummyClient(object): 41 42 def __init__(self, *args, **kwargs): 43 self.cache = LRUCache(limit=5000) 44 45 def get(self, key, *args, **kwargs): 46 return self.cache.get(key) 47 48 def get_multi(self, keys): 49 cache = self.cache 50 return dict((k, cache[k]) for k in keys if k in cache) 51 52 def set(self, key, value, *args, **kwargs): 53 self.cache[key] = value 54 55 def delete(self, key, *args, **kwargs): 56 self.cache.pop(key, None) 57 58 59 backends = {"memcache": lambda: get_best_memcache, 60 "memcached": lambda: get_best_memcache, 61 "pylibmc": lambda: get_best_memcache, 62 "memory": lambda: DummyClient} 63 64 65 class CacheBackend(KeyValueStoreBackend): 66 servers = None 67 68 def __init__(self, expires=None, backend=None, options={}, **kwargs): 69 super(CacheBackend, self).__init__(self, **kwargs) 70 71 self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS, 72 **options) 73 74 self.backend = backend or self.app.conf.CELERY_CACHE_BACKEND 75 if self.backend: 76 self.backend, _, servers = self.backend.partition("://") 77 self.servers = servers.rstrip('/').split(";") 78 self.expires = self.prepare_expires(expires, type=int) 79 try: 80 self.Client = backends[self.backend]() 81 except KeyError: 82 raise ImproperlyConfigured( 83 "Unknown cache backend: %s. Please use one of the " 84 "following backends: %s" % (self.backend, 85 ", ".join(backends.keys()))) 86 87 def get_key_for_task(self, task_id): 88 return ensure_bytes(self.task_keyprefix) + ensure_bytes(task_id) 89 90 def get_key_for_taskset(self, taskset_id): 91 return ensure_bytes(self.taskset_keyprefix) + ensure_bytes(taskset_id) 92 93 def get(self, key): 94 return self.client.get(key) 95 96 def mget(self, keys): 97 return self.client.get_multi(keys) 98 99 def set(self, key, value): 100 return self.client.set(key, value, self.expires) 101 102 def delete(self, key): 103 return self.client.delete(key) 104 105 @cached_property 106 def client(self): 107 return self.Client(self.servers, **self.options) 108 109 def __reduce__(self, args=(), kwargs={}): 110 servers = ";".join(self.servers) 111 backend = "%s://%s/" % (self.backend, servers) 112 kwargs.update( 113 dict(backend=backend, 114 expires=self.expires, 115 options=self.options)) 116 return super(CacheBackend, self).__reduce__(args, kwargs) 117 [end of celery/backends/cache.py] [start of celery/backends/database.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 from datetime import datetime 5 6 from .. import states 7 from ..db.models import Task, TaskSet 8 from ..db.session import ResultSession 9 from ..exceptions import ImproperlyConfigured 10 from ..utils.timeutils import maybe_timedelta 11 12 from .base import BaseDictBackend 13 14 15 def _sqlalchemy_installed(): 16 try: 17 import sqlalchemy 18 except ImportError: 19 raise ImproperlyConfigured( 20 "The database result backend requires SQLAlchemy to be installed." 21 "See http://pypi.python.org/pypi/SQLAlchemy") 22 return sqlalchemy 23 _sqlalchemy_installed() 24 25 26 class DatabaseBackend(BaseDictBackend): 27 """The database result backend.""" 28 # ResultSet.iterate should sleep this much between each pool, 29 # to not bombard the database with queries. 30 subpolling_interval = 0.5 31 32 def __init__(self, dburi=None, expires=None, 33 engine_options=None, **kwargs): 34 super(DatabaseBackend, self).__init__(**kwargs) 35 conf = self.app.conf 36 self.expires = maybe_timedelta(self.prepare_expires(expires)) 37 self.dburi = dburi or conf.CELERY_RESULT_DBURI 38 self.engine_options = dict(engine_options or {}, 39 **conf.CELERY_RESULT_ENGINE_OPTIONS or {}) 40 self.short_lived_sessions = kwargs.get("short_lived_sessions", 41 conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS) 42 if not self.dburi: 43 raise ImproperlyConfigured( 44 "Missing connection string! Do you have " 45 "CELERY_RESULT_DBURI set to a real value?") 46 47 def ResultSession(self): 48 return ResultSession( 49 dburi=self.dburi, 50 short_lived_sessions=self.short_lived_sessions, 51 **self.engine_options) 52 53 def _store_result(self, task_id, result, status, traceback=None): 54 """Store return value and status of an executed task.""" 55 session = self.ResultSession() 56 try: 57 task = session.query(Task).filter(Task.task_id == task_id).first() 58 if not task: 59 task = Task(task_id) 60 session.add(task) 61 session.flush() 62 task.result = result 63 task.status = status 64 task.traceback = traceback 65 session.commit() 66 finally: 67 session.close() 68 return result 69 70 def _get_task_meta_for(self, task_id): 71 """Get task metadata for a task by id.""" 72 session = self.ResultSession() 73 try: 74 task = session.query(Task).filter(Task.task_id == task_id).first() 75 if task is None: 76 task = Task(task_id) 77 task.status = states.PENDING 78 task.result = None 79 return task.to_dict() 80 finally: 81 session.close() 82 83 def _save_taskset(self, taskset_id, result): 84 """Store the result of an executed taskset.""" 85 session = self.ResultSession() 86 try: 87 taskset = TaskSet(taskset_id, result) 88 session.add(taskset) 89 session.flush() 90 session.commit() 91 return result 92 finally: 93 session.close() 94 95 def _restore_taskset(self, taskset_id): 96 """Get metadata for taskset by id.""" 97 session = self.ResultSession() 98 try: 99 taskset = session.query(TaskSet).filter( 100 TaskSet.taskset_id == taskset_id).first() 101 if taskset: 102 return taskset.to_dict() 103 finally: 104 session.close() 105 106 def _delete_taskset(self, taskset_id): 107 """Delete metadata for taskset by id.""" 108 session = self.ResultSession() 109 try: 110 session.query(TaskSet).filter( 111 TaskSet.taskset_id == taskset_id).delete() 112 session.flush() 113 session.commit() 114 finally: 115 session.close() 116 117 def _forget(self, task_id): 118 """Forget about result.""" 119 session = self.ResultSession() 120 try: 121 session.query(Task).filter(Task.task_id == task_id).delete() 122 session.commit() 123 finally: 124 session.close() 125 126 def cleanup(self): 127 """Delete expired metadata.""" 128 session = self.ResultSession() 129 expires = self.expires 130 try: 131 session.query(Task).filter( 132 Task.date_done < (datetime.now() - expires)).delete() 133 session.query(TaskSet).filter( 134 TaskSet.date_done < (datetime.now() - expires)).delete() 135 session.commit() 136 finally: 137 session.close() 138 139 def __reduce__(self, args=(), kwargs={}): 140 kwargs.update( 141 dict(dburi=self.dburi, 142 expires=self.expires, 143 engine_options=self.engine_options)) 144 return super(DatabaseBackend, self).__reduce__(args, kwargs) 145 [end of celery/backends/database.py] [start of celery/backends/mongodb.py] 1 # -*- coding: utf-8 -*- 2 """MongoDB backend for celery.""" 3 from __future__ import absolute_import 4 5 from datetime import datetime 6 7 try: 8 import pymongo 9 except ImportError: 10 pymongo = None # noqa 11 12 from .. import states 13 from ..exceptions import ImproperlyConfigured 14 from ..utils.timeutils import maybe_timedelta 15 16 from .base import BaseDictBackend 17 18 19 class Bunch: 20 21 def __init__(self, **kw): 22 self.__dict__.update(kw) 23 24 25 class MongoBackend(BaseDictBackend): 26 mongodb_host = "localhost" 27 mongodb_port = 27017 28 mongodb_user = None 29 mongodb_password = None 30 mongodb_database = "celery" 31 mongodb_taskmeta_collection = "celery_taskmeta" 32 33 def __init__(self, *args, **kwargs): 34 """Initialize MongoDB backend instance. 35 36 :raises celery.exceptions.ImproperlyConfigured: if 37 module :mod:`pymongo` is not available. 38 39 """ 40 super(MongoBackend, self).__init__(*args, **kwargs) 41 self.expires = kwargs.get("expires") or maybe_timedelta( 42 self.app.conf.CELERY_TASK_RESULT_EXPIRES) 43 44 if not pymongo: 45 raise ImproperlyConfigured( 46 "You need to install the pymongo library to use the " 47 "MongoDB backend.") 48 49 config = self.app.conf.get("CELERY_MONGODB_BACKEND_SETTINGS", None) 50 if config is not None: 51 if not isinstance(config, dict): 52 raise ImproperlyConfigured( 53 "MongoDB backend settings should be grouped in a dict") 54 55 self.mongodb_host = config.get("host", self.mongodb_host) 56 self.mongodb_port = int(config.get("port", self.mongodb_port)) 57 self.mongodb_user = config.get("user", self.mongodb_user) 58 self.mongodb_password = config.get( 59 "password", self.mongodb_password) 60 self.mongodb_database = config.get( 61 "database", self.mongodb_database) 62 self.mongodb_taskmeta_collection = config.get( 63 "taskmeta_collection", self.mongodb_taskmeta_collection) 64 65 self._connection = None 66 self._database = None 67 68 def _get_connection(self): 69 """Connect to the MongoDB server.""" 70 if self._connection is None: 71 from pymongo.connection import Connection 72 self._connection = Connection(self.mongodb_host, 73 self.mongodb_port) 74 return self._connection 75 76 def _get_database(self): 77 """"Get database from MongoDB connection and perform authentication 78 if necessary.""" 79 if self._database is None: 80 conn = self._get_connection() 81 db = conn[self.mongodb_database] 82 if self.mongodb_user and self.mongodb_password: 83 auth = db.authenticate(self.mongodb_user, 84 self.mongodb_password) 85 if not auth: 86 raise ImproperlyConfigured( 87 "Invalid MongoDB username or password.") 88 self._database = db 89 90 return self._database 91 92 def process_cleanup(self): 93 if self._connection is not None: 94 # MongoDB connection will be closed automatically when object 95 # goes out of scope 96 self._connection = None 97 98 def _store_result(self, task_id, result, status, traceback=None): 99 """Store return value and status of an executed task.""" 100 from pymongo.binary import Binary 101 102 meta = {"_id": task_id, 103 "status": status, 104 "result": Binary(self.encode(result)), 105 "date_done": datetime.now(), 106 "traceback": Binary(self.encode(traceback))} 107 108 db = self._get_database() 109 taskmeta_collection = db[self.mongodb_taskmeta_collection] 110 taskmeta_collection.save(meta, safe=True) 111 112 return result 113 114 def _get_task_meta_for(self, task_id): 115 """Get task metadata for a task by id.""" 116 117 db = self._get_database() 118 taskmeta_collection = db[self.mongodb_taskmeta_collection] 119 obj = taskmeta_collection.find_one({"_id": task_id}) 120 if not obj: 121 return {"status": states.PENDING, "result": None} 122 123 meta = { 124 "task_id": obj["_id"], 125 "status": obj["status"], 126 "result": self.decode(obj["result"]), 127 "date_done": obj["date_done"], 128 "traceback": self.decode(obj["traceback"]), 129 } 130 131 return meta 132 133 def cleanup(self): 134 """Delete expired metadata.""" 135 db = self._get_database() 136 taskmeta_collection = db[self.mongodb_taskmeta_collection] 137 taskmeta_collection.remove({ 138 "date_done": { 139 "$lt": datetime.now() - self.expires, 140 } 141 }) 142 143 def __reduce__(self, args=(), kwargs={}): 144 kwargs.update( 145 dict(expires=self.expires)) 146 return super(MongoBackend, self).__reduce__(args, kwargs) 147 [end of celery/backends/mongodb.py] [start of celery/backends/redis.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 from ..exceptions import ImproperlyConfigured 5 from ..utils import cached_property 6 7 from .base import KeyValueStoreBackend 8 9 try: 10 import redis 11 from redis.exceptions import ConnectionError 12 except ImportError: 13 redis = None # noqa 14 ConnectionError = None # noqa 15 16 17 class RedisBackend(KeyValueStoreBackend): 18 """Redis task result store.""" 19 20 #: redis-py client module. 21 redis = redis 22 23 #: default Redis server hostname (`localhost`). 24 host = "localhost" 25 26 #: default Redis server port (6379) 27 port = 6379 28 29 #: default Redis db number (0) 30 db = 0 31 32 #: default Redis password (:const:`None`) 33 password = None 34 35 def __init__(self, host=None, port=None, db=None, password=None, 36 expires=None, **kwargs): 37 super(RedisBackend, self).__init__(**kwargs) 38 conf = self.app.conf 39 if self.redis is None: 40 raise ImproperlyConfigured( 41 "You need to install the redis library in order to use " 42 + "Redis result store backend.") 43 44 # For compatibility with the old REDIS_* configuration keys. 45 def _get(key): 46 for prefix in "CELERY_REDIS_%s", "REDIS_%s": 47 try: 48 return conf[prefix % key] 49 except KeyError: 50 pass 51 52 self.host = host or _get("HOST") or self.host 53 self.port = int(port or _get("PORT") or self.port) 54 self.db = db or _get("DB") or self.db 55 self.password = password or _get("PASSWORD") or self.password 56 self.expires = self.prepare_expires(expires, type=int) 57 58 def get(self, key): 59 return self.client.get(key) 60 61 def mget(self, keys): 62 return self.client.mget(keys) 63 64 def set(self, key, value): 65 client = self.client 66 client.set(key, value) 67 if self.expires is not None: 68 client.expire(key, self.expires) 69 70 def delete(self, key): 71 self.client.delete(key) 72 73 def on_chord_apply(self, setid, *args, **kwargs): 74 pass 75 76 def on_chord_part_return(self, task, propagate=False, 77 keyprefix="chord-unlock-%s"): 78 from ..task.sets import subtask 79 from ..result import TaskSetResult 80 setid = task.request.taskset 81 key = keyprefix % setid 82 deps = TaskSetResult.restore(setid, backend=task.backend) 83 if self.client.incr(key) >= deps.total: 84 subtask(task.request.chord).delay(deps.join(propagate=propagate)) 85 deps.delete() 86 self.client.expire(key, 86400) 87 88 @cached_property 89 def client(self): 90 return self.redis.Redis(host=self.host, port=self.port, 91 db=self.db, password=self.password) 92 93 def __reduce__(self, args=(), kwargs={}): 94 kwargs.update( 95 dict(host=self.host, 96 port=self.port, 97 db=self.db, 98 password=self.password, 99 expires=self.expires)) 100 return super(RedisBackend, self).__reduce__(args, kwargs) 101 [end of celery/backends/redis.py] [start of celery/beat.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.beat 4 ~~~~~~~~~~~ 5 6 The Celery periodic task scheduler. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 import errno 15 import os 16 import time 17 import shelve 18 import sys 19 import threading 20 import traceback 21 try: 22 import multiprocessing 23 except ImportError: 24 multiprocessing = None # noqa 25 26 from datetime import datetime 27 28 from . import __version__ 29 from . import platforms 30 from . import registry 31 from . import signals 32 from .app import app_or_default 33 from .log import SilenceRepeated 34 from .schedules import maybe_schedule, crontab 35 from .utils import cached_property, instantiate, maybe_promise 36 from .utils.timeutils import humanize_seconds 37 38 39 class SchedulingError(Exception): 40 """An error occured while scheduling a task.""" 41 42 43 class ScheduleEntry(object): 44 """An entry in the scheduler. 45 46 :keyword name: see :attr:`name`. 47 :keyword schedule: see :attr:`schedule`. 48 :keyword args: see :attr:`args`. 49 :keyword kwargs: see :attr:`kwargs`. 50 :keyword options: see :attr:`options`. 51 :keyword last_run_at: see :attr:`last_run_at`. 52 :keyword total_run_count: see :attr:`total_run_count`. 53 :keyword relative: Is the time relative to when the server starts? 54 55 """ 56 57 #: The task name 58 name = None 59 60 #: The schedule (run_every/crontab) 61 schedule = None 62 63 #: Positional arguments to apply. 64 args = None 65 66 #: Keyword arguments to apply. 67 kwargs = None 68 69 #: Task execution options. 70 options = None 71 72 #: The time and date of when this task was last scheduled. 73 last_run_at = None 74 75 #: Total number of times this task has been scheduled. 76 total_run_count = 0 77 78 def __init__(self, name=None, task=None, last_run_at=None, 79 total_run_count=None, schedule=None, args=(), kwargs={}, 80 options={}, relative=False): 81 self.name = name 82 self.task = task 83 self.args = args 84 self.kwargs = kwargs 85 self.options = options 86 self.schedule = maybe_schedule(schedule, relative) 87 self.last_run_at = last_run_at or self._default_now() 88 self.total_run_count = total_run_count or 0 89 90 def _default_now(self): 91 return datetime.now() 92 93 def _next_instance(self, last_run_at=None): 94 """Returns a new instance of the same class, but with 95 its date and count fields updated.""" 96 return self.__class__(**dict(self, 97 last_run_at=last_run_at or datetime.now(), 98 total_run_count=self.total_run_count + 1)) 99 __next__ = next = _next_instance # for 2to3 100 101 def update(self, other): 102 """Update values from another entry. 103 104 Does only update "editable" fields (task, schedule, args, kwargs, 105 options). 106 107 """ 108 self.__dict__.update({"task": other.task, "schedule": other.schedule, 109 "args": other.args, "kwargs": other.kwargs, 110 "options": other.options}) 111 112 def is_due(self): 113 """See :meth:`celery.task.base.PeriodicTask.is_due`.""" 114 return self.schedule.is_due(self.last_run_at) 115 116 def __iter__(self): 117 return vars(self).iteritems() 118 119 def __repr__(self): 120 return ("<Entry: %(name)s %(task)s(*%(args)s, **%(kwargs)s) " 121 "{%(schedule)s}>" % vars(self)) 122 123 124 class Scheduler(object): 125 """Scheduler for periodic tasks. 126 127 :keyword schedule: see :attr:`schedule`. 128 :keyword logger: see :attr:`logger`. 129 :keyword max_interval: see :attr:`max_interval`. 130 131 """ 132 133 Entry = ScheduleEntry 134 135 #: The schedule dict/shelve. 136 schedule = None 137 138 #: Current logger. 139 logger = None 140 141 #: Maximum time to sleep between re-checking the schedule. 142 max_interval = 1 143 144 #: How often to sync the schedule (3 minutes by default) 145 sync_every = 3 * 60 146 147 _last_sync = None 148 149 def __init__(self, schedule=None, logger=None, max_interval=None, 150 app=None, Publisher=None, lazy=False, **kwargs): 151 app = self.app = app_or_default(app) 152 self.data = maybe_promise({} if schedule is None else schedule) 153 self.logger = logger or app.log.get_default_logger(name="celery.beat") 154 self.max_interval = max_interval or \ 155 app.conf.CELERYBEAT_MAX_LOOP_INTERVAL 156 self.Publisher = Publisher or app.amqp.TaskPublisher 157 if not lazy: 158 self.setup_schedule() 159 160 def install_default_entries(self, data): 161 entries = {} 162 if self.app.conf.CELERY_TASK_RESULT_EXPIRES: 163 if "celery.backend_cleanup" not in data: 164 entries["celery.backend_cleanup"] = { 165 "task": "celery.backend_cleanup", 166 "schedule": crontab("0", "4", "*"), 167 "options": {"expires": 12 * 3600}} 168 self.update_from_dict(entries) 169 170 def maybe_due(self, entry, publisher=None): 171 is_due, next_time_to_run = entry.is_due() 172 173 if is_due: 174 self.logger.debug("Scheduler: Sending due task %s", entry.task) 175 try: 176 result = self.apply_async(entry, publisher=publisher) 177 except Exception, exc: 178 self.logger.error("Message Error: %s\n%s", exc, 179 traceback.format_stack(), 180 exc_info=sys.exc_info()) 181 else: 182 self.logger.debug("%s sent. id->%s", entry.task, 183 result.task_id) 184 return next_time_to_run 185 186 def tick(self): 187 """Run a tick, that is one iteration of the scheduler. 188 189 Executes all due tasks. 190 191 """ 192 remaining_times = [] 193 try: 194 for entry in self.schedule.itervalues(): 195 next_time_to_run = self.maybe_due(entry, self.publisher) 196 if next_time_to_run: 197 remaining_times.append(next_time_to_run) 198 except RuntimeError: 199 pass 200 201 return min(remaining_times + [self.max_interval]) 202 203 def should_sync(self): 204 return (not self._last_sync or 205 (time.time() - self._last_sync) > self.sync_every) 206 207 def reserve(self, entry): 208 new_entry = self.schedule[entry.name] = entry.next() 209 return new_entry 210 211 def apply_async(self, entry, publisher=None, **kwargs): 212 # Update timestamps and run counts before we actually execute, 213 # so we have that done if an exception is raised (doesn't schedule 214 # forever.) 215 entry = self.reserve(entry) 216 task = registry.tasks.get(entry.task) 217 218 try: 219 if task: 220 result = task.apply_async(entry.args, entry.kwargs, 221 publisher=publisher, 222 **entry.options) 223 else: 224 result = self.send_task(entry.task, entry.args, entry.kwargs, 225 publisher=publisher, 226 **entry.options) 227 except Exception, exc: 228 raise SchedulingError("Couldn't apply scheduled task %s: %s" % ( 229 entry.name, exc)) 230 231 if self.should_sync(): 232 self._do_sync() 233 return result 234 235 def send_task(self, *args, **kwargs): # pragma: no cover 236 return self.app.send_task(*args, **kwargs) 237 238 def setup_schedule(self): 239 self.install_default_entries(self.data) 240 241 def _do_sync(self): 242 try: 243 self.logger.debug("Celerybeat: Synchronizing schedule...") 244 self.sync() 245 finally: 246 self._last_sync = time.time() 247 248 def sync(self): 249 pass 250 251 def close(self): 252 self.sync() 253 254 def add(self, **kwargs): 255 entry = self.Entry(**kwargs) 256 self.schedule[entry.name] = entry 257 return entry 258 259 def _maybe_entry(self, name, entry): 260 if isinstance(entry, self.Entry): 261 return entry 262 return self.Entry(**dict(entry, name=name)) 263 264 def update_from_dict(self, dict_): 265 self.schedule.update(dict((name, self._maybe_entry(name, entry)) 266 for name, entry in dict_.items())) 267 268 def merge_inplace(self, b): 269 schedule = self.schedule 270 A, B = set(schedule.keys()), set(b.keys()) 271 272 # Remove items from disk not in the schedule anymore. 273 for key in A ^ B: 274 schedule.pop(key, None) 275 276 # Update and add new items in the schedule 277 for key in B: 278 entry = self.Entry(**dict(b[key], name=key)) 279 if schedule.get(key): 280 schedule[key].update(entry) 281 else: 282 schedule[key] = entry 283 284 def get_schedule(self): 285 return self.data 286 287 def set_schedule(self, schedule): 288 self.data = schedule 289 290 def _ensure_connected(self): 291 # callback called for each retry while the connection 292 # can't be established. 293 def _error_handler(exc, interval): 294 self.logger.error("Celerybeat: Connection error: %s. " 295 "Trying again in %s seconds...", exc, interval) 296 297 return self.connection.ensure_connection(_error_handler, 298 self.app.conf.BROKER_CONNECTION_MAX_RETRIES) 299 300 @cached_property 301 def connection(self): 302 return self.app.broker_connection() 303 304 @cached_property 305 def publisher(self): 306 return self.Publisher(connection=self._ensure_connected()) 307 308 @property 309 def schedule(self): 310 return self.get_schedule() 311 312 @property 313 def info(self): 314 return "" 315 316 317 class PersistentScheduler(Scheduler): 318 persistence = shelve 319 320 _store = None 321 322 def __init__(self, *args, **kwargs): 323 self.schedule_filename = kwargs.get("schedule_filename") 324 Scheduler.__init__(self, *args, **kwargs) 325 326 def _remove_db(self): 327 for suffix in "", ".db", ".dat", ".bak", ".dir": 328 try: 329 os.remove(self.schedule_filename + suffix) 330 except OSError, exc: 331 if exc.errno != errno.ENOENT: 332 raise 333 334 def setup_schedule(self): 335 try: 336 self._store = self.persistence.open(self.schedule_filename, 337 writeback=True) 338 entries = self._store.setdefault("entries", {}) 339 except Exception, exc: 340 self.logger.error("Removing corrupted schedule file %r: %r", 341 self.schedule_filename, exc, exc_info=True) 342 self._remove_db() 343 self._store = self.persistence.open(self.schedule_filename, 344 writeback=True) 345 else: 346 if "__version__" not in self._store: 347 self._store.clear() # remove schedule at 2.2.2 upgrade. 348 entries = self._store.setdefault("entries", {}) 349 self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE) 350 self.install_default_entries(self.schedule) 351 self._store["__version__"] = __version__ 352 self.sync() 353 self.logger.debug("Current schedule:\n" + 354 "\n".join(repr(entry) 355 for entry in entries.itervalues())) 356 357 def get_schedule(self): 358 return self._store["entries"] 359 360 def sync(self): 361 if self._store is not None: 362 self._store.sync() 363 364 def close(self): 365 self.sync() 366 self._store.close() 367 368 @property 369 def info(self): 370 return " . db -> %s" % (self.schedule_filename, ) 371 372 373 class Service(object): 374 scheduler_cls = PersistentScheduler 375 376 def __init__(self, logger=None, max_interval=None, schedule_filename=None, 377 scheduler_cls=None, app=None): 378 app = self.app = app_or_default(app) 379 self.max_interval = max_interval or \ 380 app.conf.CELERYBEAT_MAX_LOOP_INTERVAL 381 self.scheduler_cls = scheduler_cls or self.scheduler_cls 382 self.logger = logger or app.log.get_default_logger(name="celery.beat") 383 self.schedule_filename = schedule_filename or \ 384 app.conf.CELERYBEAT_SCHEDULE_FILENAME 385 386 self._is_shutdown = threading.Event() 387 self._is_stopped = threading.Event() 388 self.debug = SilenceRepeated(self.logger.debug, 389 10 if self.max_interval < 60 else 1) 390 391 def start(self, embedded_process=False): 392 self.logger.info("Celerybeat: Starting...") 393 self.logger.debug("Celerybeat: Ticking with max interval->%s", 394 humanize_seconds(self.scheduler.max_interval)) 395 396 signals.beat_init.send(sender=self) 397 if embedded_process: 398 signals.beat_embedded_init.send(sender=self) 399 platforms.set_process_title("celerybeat") 400 401 try: 402 while not self._is_shutdown.isSet(): 403 interval = self.scheduler.tick() 404 self.debug("Celerybeat: Waking up %s." % ( 405 humanize_seconds(interval, prefix="in "))) 406 time.sleep(interval) 407 except (KeyboardInterrupt, SystemExit): 408 self._is_shutdown.set() 409 finally: 410 self.sync() 411 412 def sync(self): 413 self.scheduler.close() 414 self._is_stopped.set() 415 416 def stop(self, wait=False): 417 self.logger.info("Celerybeat: Shutting down...") 418 self._is_shutdown.set() 419 wait and self._is_stopped.wait() # block until shutdown done. 420 421 def get_scheduler(self, lazy=False): 422 filename = self.schedule_filename 423 scheduler = instantiate(self.scheduler_cls, 424 app=self.app, 425 schedule_filename=filename, 426 logger=self.logger, 427 max_interval=self.max_interval, 428 lazy=lazy) 429 return scheduler 430 431 @cached_property 432 def scheduler(self): 433 return self.get_scheduler() 434 435 436 class _Threaded(threading.Thread): 437 """Embedded task scheduler using threading.""" 438 439 def __init__(self, *args, **kwargs): 440 super(_Threaded, self).__init__() 441 self.service = Service(*args, **kwargs) 442 self.setDaemon(True) 443 self.setName("Beat") 444 445 def run(self): 446 self.service.start() 447 448 def stop(self): 449 self.service.stop(wait=True) 450 451 452 if multiprocessing is not None: 453 454 class _Process(multiprocessing.Process): 455 """Embedded task scheduler using multiprocessing.""" 456 457 def __init__(self, *args, **kwargs): 458 super(_Process, self).__init__() 459 self.service = Service(*args, **kwargs) 460 self.name = "Beat" 461 462 def run(self): 463 platforms.signals.reset("SIGTERM") 464 self.service.start(embedded_process=True) 465 466 def stop(self): 467 self.service.stop() 468 self.terminate() 469 else: 470 _Process = None 471 472 473 def EmbeddedService(*args, **kwargs): 474 """Return embedded clock service. 475 476 :keyword thread: Run threaded instead of as a separate process. 477 Default is :const:`False`. 478 479 """ 480 if kwargs.pop("thread", False) or _Process is None: 481 # Need short max interval to be able to stop thread 482 # in reasonable time. 483 kwargs.setdefault("max_interval", 1) 484 return _Threaded(*args, **kwargs) 485 486 return _Process(*args, **kwargs) 487 [end of celery/beat.py] [start of celery/bin/base.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 import os 5 import sys 6 import warnings 7 8 from optparse import OptionParser, make_option as Option 9 10 from .. import __version__, Celery 11 from ..exceptions import CDeprecationWarning, CPendingDeprecationWarning 12 13 14 # always enable DeprecationWarnings, so our users can see them. 15 for warning in (CDeprecationWarning, CPendingDeprecationWarning): 16 warnings.simplefilter("once", warning, 0) 17 18 19 class Command(object): 20 """Base class for command line applications. 21 22 :keyword app: The current app. 23 :keyword get_app: Callable returning the current app if no app provided. 24 25 """ 26 _default_broker_url = r'amqp://guest:guest@localhost:5672//' 27 #: Arg list used in help. 28 args = '' 29 30 #: Application version. 31 version = __version__ 32 33 #: If false the parser will raise an exception if positional 34 #: args are provided. 35 supports_args = True 36 37 #: List of options (without preload options). 38 option_list = () 39 40 #: List of options to parse before parsing other options. 41 preload_options = ( 42 Option("--app", 43 default=None, action="store", dest="app", 44 help="Name of the app instance to use. "), 45 Option("-b", "--broker", 46 default=None, action="store", dest="broker", 47 help="Broker URL. Default is %s" % ( 48 _default_broker_url, )), 49 Option("--loader", 50 default=None, action="store", dest="loader", 51 help="Name of the loader class to use. " 52 "Taken from the environment variable CELERY_LOADER, " 53 "or 'default' if that is not set."), 54 Option("--config", 55 default="celeryconfig", action="store", 56 dest="config_module", 57 help="Name of the module to read configuration from."), 58 ) 59 60 #: Enable if the application should support config from the cmdline. 61 enable_config_from_cmdline = False 62 63 #: Default configuration namespace. 64 namespace = "celery" 65 66 Parser = OptionParser 67 68 def __init__(self, app=None, get_app=None): 69 self.app = app 70 self.get_app = get_app or self._get_default_app 71 72 def run(self, *args, **options): 73 """This is the body of the command called by :meth:`handle_argv`.""" 74 raise NotImplementedError("subclass responsibility") 75 76 def execute_from_commandline(self, argv=None): 77 """Execute application from command line. 78 79 :keyword argv: The list of command line arguments. 80 Defaults to ``sys.argv``. 81 82 """ 83 if argv is None: 84 argv = list(sys.argv) 85 argv = self.setup_app_from_commandline(argv) 86 prog_name = os.path.basename(argv[0]) 87 return self.handle_argv(prog_name, argv[1:]) 88 89 def usage(self): 90 """Returns the command-line usage string for this app.""" 91 return "%%prog [options] %s" % (self.args, ) 92 93 def get_options(self): 94 """Get supported command line options.""" 95 return self.option_list 96 97 def handle_argv(self, prog_name, argv): 98 """Parses command line arguments from ``argv`` and dispatches 99 to :meth:`run`. 100 101 :param prog_name: The program name (``argv[0]``). 102 :param argv: Command arguments. 103 104 Exits with an error message if :attr:`supports_args` is disabled 105 and ``argv`` contains positional arguments. 106 107 """ 108 options, args = self.parse_options(prog_name, argv) 109 if not self.supports_args and args: 110 sys.stderr.write( 111 "\nUnrecognized command line arguments: %s\n" % ( 112 ", ".join(args), )) 113 sys.stderr.write("\nTry --help?\n") 114 sys.exit(1) 115 return self.run(*args, **vars(options)) 116 117 def parse_options(self, prog_name, arguments): 118 """Parse the available options.""" 119 # Don't want to load configuration to just print the version, 120 # so we handle --version manually here. 121 if "--version" in arguments: 122 print(self.version) 123 sys.exit(0) 124 parser = self.create_parser(prog_name) 125 options, args = parser.parse_args(arguments) 126 return options, args 127 128 def create_parser(self, prog_name): 129 return self.Parser(prog=prog_name, 130 usage=self.usage(), 131 version=self.version, 132 option_list=(self.preload_options + 133 self.get_options())) 134 135 def prepare_preload_options(self, options): 136 """Optional handler to do additional processing of preload options. 137 138 Configuration must not have been initialized 139 until after this is called. 140 141 """ 142 pass 143 144 def setup_app_from_commandline(self, argv): 145 preload_options = self.parse_preload_options(argv) 146 self.prepare_preload_options(preload_options) 147 app = (preload_options.get("app") or 148 os.environ.get("CELERY_APP") or 149 self.app) 150 loader = (preload_options.get("loader") or 151 os.environ.get("CELERY_LOADER") or 152 "default") 153 broker = preload_options.get("broker", None) 154 if broker: 155 os.environ["CELERY_BROKER_URL"] = broker 156 config_module = preload_options.get("config_module") 157 if config_module: 158 os.environ["CELERY_CONFIG_MODULE"] = config_module 159 if app: 160 self.app = self.get_cls_by_name(app) 161 else: 162 self.app = self.get_app(loader=loader) 163 if self.enable_config_from_cmdline: 164 argv = self.process_cmdline_config(argv) 165 return argv 166 167 def get_cls_by_name(self, name): 168 from ..utils import get_cls_by_name, import_from_cwd 169 return get_cls_by_name(name, imp=import_from_cwd) 170 171 def process_cmdline_config(self, argv): 172 try: 173 cargs_start = argv.index('--') 174 except ValueError: 175 return argv 176 argv, cargs = argv[:cargs_start], argv[cargs_start + 1:] 177 self.app.config_from_cmdline(cargs, namespace=self.namespace) 178 return argv 179 180 def parse_preload_options(self, args): 181 acc = {} 182 opts = {} 183 for opt in self.preload_options: 184 for t in (opt._long_opts, opt._short_opts): 185 opts.update(dict(zip(t, [opt.dest] * len(t)))) 186 index = 0 187 length = len(args) 188 while index < length: 189 arg = args[index] 190 if arg.startswith('--') and '=' in arg: 191 key, value = arg.split('=', 1) 192 dest = opts.get(key) 193 if dest: 194 acc[dest] = value 195 elif arg.startswith('-'): 196 dest = opts.get(arg) 197 if dest: 198 acc[dest] = args[index + 1] 199 index += 1 200 index += 1 201 return acc 202 203 def _get_default_app(self, *args, **kwargs): 204 return Celery(*args, **kwargs) 205 206 207 def daemon_options(default_pidfile=None, default_logfile=None): 208 return ( 209 Option('-f', '--logfile', default=default_logfile, 210 action="store", dest="logfile", 211 help="Path to the logfile"), 212 Option('--pidfile', default=default_pidfile, 213 action="store", dest="pidfile", 214 help="Path to the pidfile."), 215 Option('--uid', default=None, 216 action="store", dest="uid", 217 help="Effective user id to run as when detached."), 218 Option('--gid', default=None, 219 action="store", dest="gid", 220 help="Effective group id to run as when detached."), 221 Option('--umask', default=0, 222 action="store", type="int", dest="umask", 223 help="Umask of the process when detached."), 224 Option('--workdir', default=None, 225 action="store", dest="working_directory", 226 help="Directory to change to when detached."), 227 ) 228 [end of celery/bin/base.py] [start of celery/bin/celerybeat.py] 1 # -*- coding: utf-8 -*- 2 """celerybeat 3 4 .. program:: celerybeat 5 6 .. cmdoption:: -s, --schedule 7 8 Path to the schedule database. Defaults to `celerybeat-schedule`. 9 The extension ".db" will be appended to the filename. 10 11 .. cmdoption:: -S, --scheduler 12 13 Scheduler class to use. Default is celery.beat.PersistentScheduler 14 15 .. cmdoption:: -f, --logfile 16 17 Path to log file. If no logfile is specified, `stderr` is used. 18 19 .. cmdoption:: -l, --loglevel 20 21 Logging level, choose between `DEBUG`, `INFO`, `WARNING`, 22 `ERROR`, `CRITICAL`, or `FATAL`. 23 24 """ 25 from __future__ import with_statement 26 from __future__ import absolute_import 27 28 import os 29 30 from functools import partial 31 32 from celery.platforms import detached 33 34 from celery.bin.base import Command, Option, daemon_options 35 36 37 class BeatCommand(Command): 38 supports_args = False 39 preload_options = (Command.preload_options 40 + daemon_options(default_pidfile="celerybeat.pid")) 41 42 def run(self, detach=False, logfile=None, pidfile=None, uid=None, 43 gid=None, umask=None, working_directory=None, **kwargs): 44 workdir = working_directory 45 kwargs.pop("app", None) 46 beat = partial(self.app.Beat, 47 logfile=logfile, pidfile=pidfile, **kwargs) 48 49 if detach: 50 with detached(logfile, pidfile, uid, gid, umask, workdir): 51 return beat().run() 52 else: 53 return beat().run() 54 55 def prepare_preload_options(self, options): 56 workdir = options.get("working_directory") 57 if workdir: 58 os.chdir(workdir) 59 60 def get_options(self): 61 conf = self.app.conf 62 63 return ( 64 Option('--detach', 65 default=False, action="store_true", dest="detach", 66 help="Detach and run in the background."), 67 Option('-s', '--schedule', 68 default=conf.CELERYBEAT_SCHEDULE_FILENAME, 69 action="store", dest="schedule", 70 help="Path to the schedule database. The extension " 71 "'.db' will be appended to the filename. Default: %s" % ( 72 conf.CELERYBEAT_SCHEDULE_FILENAME, )), 73 Option('--max-interval', 74 default=None, type="float", dest="max_interval", 75 help="Max. seconds to sleep between schedule iterations."), 76 Option('-S', '--scheduler', 77 default=None, 78 action="store", dest="scheduler_cls", 79 help="Scheduler class. Default is " 80 "celery.beat.PersistentScheduler"), 81 Option('-l', '--loglevel', 82 default=conf.CELERYBEAT_LOG_LEVEL, 83 action="store", dest="loglevel", 84 help="Loglevel. One of DEBUG/INFO/WARNING/ERROR/CRITICAL.")) 85 86 87 def main(): 88 beat = BeatCommand() 89 beat.execute_from_commandline() 90 91 if __name__ == "__main__": # pragma: no cover 92 main() 93 [end of celery/bin/celerybeat.py] [start of celery/bin/celeryd.py] 1 # -*- coding: utf-8 -*- 2 """celeryd 3 4 .. program:: celeryd 5 6 .. cmdoption:: -c, --concurrency 7 8 Number of child processes processing the queue. The default 9 is the number of CPUs available on your system. 10 11 .. cmdoption:: -f, --logfile 12 13 Path to log file. If no logfile is specified, `stderr` is used. 14 15 .. cmdoption:: -l, --loglevel 16 17 Logging level, choose between `DEBUG`, `INFO`, `WARNING`, 18 `ERROR`, `CRITICAL`, or `FATAL`. 19 20 .. cmdoption:: -n, --hostname 21 22 Set custom hostname. 23 24 .. cmdoption:: -B, --beat 25 26 Also run the `celerybeat` periodic task scheduler. Please note that 27 there must only be one instance of this service. 28 29 .. cmdoption:: -Q, --queues 30 31 List of queues to enable for this worker, separated by comma. 32 By default all configured queues are enabled. 33 Example: `-Q video,image` 34 35 .. cmdoption:: -I, --include 36 37 Comma separated list of additional modules to import. 38 Example: -I foo.tasks,bar.tasks 39 40 .. cmdoption:: -s, --schedule 41 42 Path to the schedule database if running with the `-B` option. 43 Defaults to `celerybeat-schedule`. The extension ".db" will be 44 appended to the filename. 45 46 .. cmdoption:: --scheduler 47 48 Scheduler class to use. Default is celery.beat.PersistentScheduler 49 50 .. cmdoption:: -E, --events 51 52 Send events that can be captured by monitors like `celerymon`. 53 54 .. cmdoption:: --purge, --discard 55 56 Discard all waiting tasks before the daemon is started. 57 **WARNING**: This is unrecoverable, and the tasks will be 58 deleted from the messaging server. 59 60 .. cmdoption:: --time-limit 61 62 Enables a hard time limit (in seconds) for tasks. 63 64 .. cmdoption:: --soft-time-limit 65 66 Enables a soft time limit (in seconds) for tasks. 67 68 .. cmdoption:: --maxtasksperchild 69 70 Maximum number of tasks a pool worker can execute before it's 71 terminated and replaced by a new worker. 72 73 """ 74 from __future__ import absolute_import 75 76 import sys 77 78 try: 79 from multiprocessing import freeze_support 80 except ImportError: # pragma: no cover 81 freeze_support = lambda: True # noqa 82 83 from celery.bin.base import Command, Option 84 85 86 class WorkerCommand(Command): 87 namespace = "celeryd" 88 enable_config_from_cmdline = True 89 supports_args = False 90 91 def run(self, *args, **kwargs): 92 kwargs.pop("app", None) 93 # Pools like eventlet/gevent needs to patch libs as early 94 # as possible. 95 from celery import concurrency 96 kwargs["pool"] = concurrency.get_implementation( 97 kwargs.get("pool") or self.app.conf.CELERYD_POOL) 98 return self.app.Worker(**kwargs).run() 99 100 def get_options(self): 101 conf = self.app.conf 102 return ( 103 Option('-c', '--concurrency', 104 default=conf.CELERYD_CONCURRENCY, 105 action="store", dest="concurrency", type="int", 106 help="Number of worker threads/processes"), 107 Option('-P', '--pool', 108 default=conf.CELERYD_POOL, 109 action="store", dest="pool", type="str", 110 help="Pool implementation: " 111 "processes (default), eventlet, gevent, " 112 "solo or threads."), 113 Option('--purge', '--discard', default=False, 114 action="store_true", dest="discard", 115 help="Discard all waiting tasks before the server is" 116 "started. WARNING: There is no undo operation " 117 "and the tasks will be deleted."), 118 Option('-f', '--logfile', default=conf.CELERYD_LOG_FILE, 119 action="store", dest="logfile", 120 help="Path to log file."), 121 Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL, 122 action="store", dest="loglevel", 123 help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL"), 124 Option('-n', '--hostname', default=None, 125 action="store", dest="hostname", 126 help="Set custom host name. E.g. 'foo.example.com'."), 127 Option('-B', '--beat', default=False, 128 action="store_true", dest="run_clockservice", 129 help="Also run the celerybeat periodic task scheduler. " 130 "NOTE: Only one instance of celerybeat must be" 131 "running at any one time."), 132 Option('-s', '--schedule', 133 default=conf.CELERYBEAT_SCHEDULE_FILENAME, 134 action="store", dest="schedule", 135 help="Path to the schedule database if running with the -B " 136 "option. The extension '.db' will be appended to the " 137 "filename. Default: %s" % ( 138 conf.CELERYBEAT_SCHEDULE_FILENAME, )), 139 Option('--scheduler', 140 default=None, 141 action="store", dest="scheduler_cls", 142 help="Scheduler class. Default is " 143 "celery.beat.PersistentScheduler"), 144 Option('-S', '--statedb', default=conf.CELERYD_STATE_DB, 145 action="store", dest="db", 146 help="Path to the state database. The extension '.db' will " 147 "be appended to the filename. Default: %s" % ( 148 conf.CELERYD_STATE_DB, )), 149 Option('-E', '--events', default=conf.CELERY_SEND_EVENTS, 150 action="store_true", dest="events", 151 help="Send events so the worker can be monitored by " 152 "celeryev, celerymon and other monitors.."), 153 Option('--time-limit', 154 default=conf.CELERYD_TASK_TIME_LIMIT, 155 action="store", type="int", dest="task_time_limit", 156 help="Enables a hard time limit (in seconds) for tasks."), 157 Option('--soft-time-limit', 158 default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, 159 action="store", type="int", dest="task_soft_time_limit", 160 help="Enables a soft time limit (in seconds) for tasks."), 161 Option('--maxtasksperchild', 162 default=conf.CELERYD_MAX_TASKS_PER_CHILD, 163 action="store", type="int", dest="max_tasks_per_child", 164 help="Maximum number of tasks a pool worker can execute" 165 "before it's terminated and replaced by a new worker."), 166 Option('--queues', '-Q', default=[], 167 action="store", dest="queues", 168 help="Comma separated list of queues to consume from. " 169 "By default all configured queues are used. " 170 "Example: -Q video,image"), 171 Option('--include', '-I', default=[], 172 action="store", dest="include", 173 help="Comma separated list of additional modules to import. " 174 "Example: -I foo.tasks,bar.tasks"), 175 Option('--pidfile', default=None, 176 help="Optional file used to store the workers pid. " 177 "The worker will not start if this file already exists " 178 "and the pid is still alive."), 179 Option('--autoscale', default=None, 180 help="Enable autoscaling by providing " 181 "max_concurrency,min_concurrency. Example: " 182 "--autoscale=10,3 (always keep 3 processes, " 183 "but grow to 10 if necessary)."), 184 ) 185 186 187 def main(): 188 freeze_support() 189 worker = WorkerCommand() 190 worker.execute_from_commandline() 191 192 193 def windows_main(): 194 sys.stderr.write(""" 195 196 The celeryd command does not work on Windows. 197 198 Instead, please use: 199 200 ..> python -m celery.bin.celeryd 201 202 You can also supply arguments: 203 204 ..> python -m celery.bin.celeryd --concurrency=10 --loglevel=DEBUG 205 206 207 """.strip()) 208 209 210 if __name__ == "__main__": # pragma: no cover 211 main() 212 [end of celery/bin/celeryd.py] [start of celery/concurrency/__init__.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 from ..utils import get_cls_by_name 5 6 ALIASES = { 7 "processes": "celery.concurrency.processes.TaskPool", 8 "eventlet": "celery.concurrency.eventlet.TaskPool", 9 "gevent": "celery.concurrency.gevent.TaskPool", 10 "threads": "celery.concurrency.threads.TaskPool", 11 "solo": "celery.concurrency.solo.TaskPool", 12 } 13 14 15 def get_implementation(cls): 16 return get_cls_by_name(cls, ALIASES) 17 [end of celery/concurrency/__init__.py] [start of celery/concurrency/base.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 import logging 5 import os 6 import sys 7 import time 8 import traceback 9 10 from functools import partial 11 12 from .. import log 13 from ..datastructures import ExceptionInfo 14 from ..utils import timer2 15 from ..utils.encoding import safe_repr 16 17 18 def apply_target(target, args=(), kwargs={}, callback=None, 19 accept_callback=None, pid=None): 20 if accept_callback: 21 accept_callback(pid or os.getpid(), time.time()) 22 callback(target(*args, **kwargs)) 23 24 25 class BasePool(object): 26 RUN = 0x1 27 CLOSE = 0x2 28 TERMINATE = 0x3 29 30 Timer = timer2.Timer 31 32 signal_safe = True 33 rlimit_safe = True 34 is_green = False 35 36 _state = None 37 _pool = None 38 39 def __init__(self, limit=None, putlocks=True, logger=None, **options): 40 self.limit = limit 41 self.putlocks = putlocks 42 self.logger = logger or log.get_default_logger() 43 self.options = options 44 self.does_debug = self.logger.isEnabledFor(logging.DEBUG) 45 46 def on_start(self): 47 pass 48 49 def on_stop(self): 50 pass 51 52 def on_apply(self, *args, **kwargs): 53 pass 54 55 def on_terminate(self): 56 pass 57 58 def terminate_job(self, pid): 59 raise NotImplementedError( 60 "%s does not implement kill_job" % (self.__class__, )) 61 62 def stop(self): 63 self._state = self.CLOSE 64 self.on_stop() 65 self._state = self.TERMINATE 66 67 def terminate(self): 68 self._state = self.TERMINATE 69 self.on_terminate() 70 71 def start(self): 72 self.on_start() 73 self._state = self.RUN 74 75 def apply_async(self, target, args=None, kwargs=None, callback=None, 76 errback=None, accept_callback=None, timeout_callback=None, 77 soft_timeout=None, timeout=None, **compat): 78 """Equivalent of the :func:`apply` built-in function. 79 80 Callbacks should optimally return as soon as possible ince 81 otherwise the thread which handles the result will get blocked. 82 83 """ 84 args = args or [] 85 kwargs = kwargs or {} 86 87 on_ready = partial(self.on_ready, callback, errback) 88 on_worker_error = partial(self.on_worker_error, errback) 89 90 if self.does_debug: 91 self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)", 92 target, safe_repr(args), safe_repr(kwargs)) 93 94 return self.on_apply(target, args, kwargs, 95 callback=on_ready, 96 accept_callback=accept_callback, 97 timeout_callback=timeout_callback, 98 error_callback=on_worker_error, 99 waitforslot=self.putlocks, 100 soft_timeout=soft_timeout, 101 timeout=timeout) 102 103 def on_ready(self, callback, errback, ret_value): 104 """What to do when a worker task is ready and its return value has 105 been collected.""" 106 107 if isinstance(ret_value, ExceptionInfo): 108 if isinstance(ret_value.exception, ( 109 SystemExit, KeyboardInterrupt)): 110 raise ret_value.exception 111 self.safe_apply_callback(errback, ret_value) 112 else: 113 self.safe_apply_callback(callback, ret_value) 114 115 def on_worker_error(self, errback, exc_info): 116 errback(exc_info) 117 118 def safe_apply_callback(self, fun, *args): 119 if fun: 120 try: 121 fun(*args) 122 except BaseException: 123 self.logger.error("Pool callback raised exception: %s", 124 traceback.format_exc(), 125 exc_info=sys.exc_info()) 126 127 def _get_info(self): 128 return {} 129 130 @property 131 def info(self): 132 return self._get_info() 133 134 @property 135 def active(self): 136 return self._state == self.RUN 137 138 @property 139 def num_processes(self): 140 return self.limit 141 [end of celery/concurrency/base.py] [start of celery/concurrency/processes/__init__.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 import platform 5 import signal as _signal 6 7 from ..base import BasePool 8 from .pool import Pool, RUN 9 10 if platform.system() == "Windows": # pragma: no cover 11 # On Windows os.kill calls TerminateProcess which cannot be 12 # handled by # any process, so this is needed to terminate the task 13 # *and its children* (if any). 14 from ._win import kill_processtree as _kill # noqa 15 else: 16 from os import kill as _kill # noqa 17 18 19 class TaskPool(BasePool): 20 """Process Pool for processing tasks in parallel. 21 22 :param processes: see :attr:`processes`. 23 :param logger: see :attr:`logger`. 24 25 26 .. attribute:: limit 27 28 The number of processes that can run simultaneously. 29 30 .. attribute:: logger 31 32 The logger used for debugging. 33 34 """ 35 Pool = Pool 36 37 def on_start(self): 38 """Run the task pool. 39 40 Will pre-fork all workers so they're ready to accept tasks. 41 42 """ 43 self._pool = self.Pool(processes=self.limit, **self.options) 44 self.on_apply = self._pool.apply_async 45 46 def on_stop(self): 47 """Gracefully stop the pool.""" 48 if self._pool is not None and self._pool._state == RUN: 49 self._pool.close() 50 self._pool.join() 51 self._pool = None 52 53 def on_terminate(self): 54 """Force terminate the pool.""" 55 if self._pool is not None: 56 self._pool.terminate() 57 self._pool = None 58 59 def terminate_job(self, pid, signal=None): 60 _kill(pid, signal or _signal.SIGTERM) 61 62 def grow(self, n=1): 63 return self._pool.grow(n) 64 65 def shrink(self, n=1): 66 return self._pool.shrink(n) 67 68 def _get_info(self): 69 return {"max-concurrency": self.limit, 70 "processes": [p.pid for p in self._pool._pool], 71 "max-tasks-per-child": self._pool._maxtasksperchild, 72 "put-guarded-by-semaphore": self.putlocks, 73 "timeouts": (self._pool.soft_timeout, self._pool.timeout)} 74 75 @property 76 def num_processes(self): 77 return self._pool._processes 78 [end of celery/concurrency/processes/__init__.py] [start of celery/concurrency/processes/pool.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Module providing the `Pool` class for managing a process pool 4 # 5 # multiprocessing/pool.py 6 # 7 # Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt 8 # 9 from __future__ import absolute_import 10 11 # 12 # Imports 13 # 14 15 import os 16 import sys 17 import errno 18 import threading 19 import Queue 20 import itertools 21 import collections 22 import time 23 import signal 24 import warnings 25 import logging 26 27 from multiprocessing import Process, cpu_count, TimeoutError 28 from multiprocessing import util 29 from multiprocessing.util import Finalize, debug 30 31 from celery.datastructures import ExceptionInfo 32 from celery.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded 33 from celery.exceptions import WorkerLostError 34 35 _Semaphore = threading._Semaphore 36 37 # 38 # Constants representing the state of a pool 39 # 40 41 RUN = 0 42 CLOSE = 1 43 TERMINATE = 2 44 45 # 46 # Constants representing the state of a job 47 # 48 49 ACK = 0 50 READY = 1 51 52 # Signal used for soft time limits. 53 SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None) 54 55 # 56 # Miscellaneous 57 # 58 59 job_counter = itertools.count() 60 61 62 def mapstar(args): 63 return map(*args) 64 65 66 def error(msg, *args, **kwargs): 67 if util._logger: 68 util._logger.error(msg, *args, **kwargs) 69 70 71 class LaxBoundedSemaphore(threading._Semaphore): 72 """Semaphore that checks that # release is <= # acquires, 73 but ignores if # releases >= value.""" 74 75 def __init__(self, value=1, verbose=None): 76 _Semaphore.__init__(self, value, verbose) 77 self._initial_value = value 78 79 if sys.version_info >= (3, 0): 80 81 def release(self): 82 if self._value < self._initial_value: 83 _Semaphore.release(self) 84 if __debug__: 85 self._note("%s.release: success, value=%s (unchanged)" % ( 86 self, self._value)) 87 88 def clear(self): 89 while self._value < self._initial_value: 90 _Semaphore.release(self) 91 else: 92 93 def release(self): # noqa 94 if self._Semaphore__value < self._initial_value: 95 _Semaphore.release(self) 96 if __debug__: 97 self._note("%s.release: success, value=%s (unchanged)" % ( 98 self, self._Semaphore__value)) 99 100 def clear(self): # noqa 101 while self._Semaphore__value < self._initial_value: 102 _Semaphore.release(self) 103 104 # 105 # Exceptions 106 # 107 108 109 class MaybeEncodingError(Exception): 110 """Wraps unpickleable object.""" 111 112 def __init__(self, exc, value): 113 self.exc = str(exc) 114 self.value = repr(value) 115 Exception.__init__(self, self.exc, self.value) 116 117 def __repr__(self): 118 return "<MaybeEncodingError: %s>" % str(self) 119 120 def __str__(self): 121 return "Error sending result: '%s'. Reason: '%s'." % ( 122 self.value, self.exc) 123 124 125 class WorkersJoined(Exception): 126 """All workers have terminated.""" 127 128 129 def soft_timeout_sighandler(signum, frame): 130 raise SoftTimeLimitExceeded() 131 132 # 133 # Code run by worker processes 134 # 135 136 137 def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None): 138 # Re-init logging system. 139 # Workaround for http://bugs.python.org/issue6721#msg140215 140 # Python logging module uses RLock() objects which are broken after 141 # fork. This can result in a deadlock (Issue #496). 142 logger_names = logging.Logger.manager.loggerDict.keys() 143 logger_names.append(None) # for root logger 144 for name in logger_names: 145 for handler in logging.getLogger(name).handlers: 146 handler.createLock() 147 logging._lock = threading.RLock() 148 149 pid = os.getpid() 150 assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) 151 put = outqueue.put 152 get = inqueue.get 153 154 if hasattr(inqueue, '_reader'): 155 156 def poll(timeout): 157 if inqueue._reader.poll(timeout): 158 return True, get() 159 return False, None 160 else: 161 162 def poll(timeout): # noqa 163 try: 164 return True, get(timeout=timeout) 165 except Queue.Empty: 166 return False, None 167 168 if hasattr(inqueue, '_writer'): 169 inqueue._writer.close() 170 outqueue._reader.close() 171 172 if initializer is not None: 173 initializer(*initargs) 174 175 if SIG_SOFT_TIMEOUT is not None: 176 signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler) 177 178 completed = 0 179 while maxtasks is None or (maxtasks and completed < maxtasks): 180 try: 181 ready, task = poll(1.0) 182 if not ready: 183 continue 184 except (EOFError, IOError): 185 debug('worker got EOFError or IOError -- exiting') 186 break 187 188 if task is None: 189 debug('worker got sentinel -- exiting') 190 break 191 192 job, i, func, args, kwds = task 193 put((ACK, (job, i, time.time(), pid))) 194 try: 195 result = (True, func(*args, **kwds)) 196 except Exception: 197 result = (False, ExceptionInfo(sys.exc_info())) 198 try: 199 put((READY, (job, i, result))) 200 except Exception, exc: 201 _, _, tb = sys.exc_info() 202 wrapped = MaybeEncodingError(exc, result[1]) 203 einfo = ExceptionInfo((MaybeEncodingError, wrapped, tb)) 204 put((READY, (job, i, (False, einfo)))) 205 206 completed += 1 207 debug('worker exiting after %d tasks' % completed) 208 209 # 210 # Class representing a process pool 211 # 212 213 214 class PoolThread(threading.Thread): 215 216 def __init__(self, *args, **kwargs): 217 threading.Thread.__init__(self) 218 self._state = RUN 219 self.daemon = True 220 221 def run(self): 222 try: 223 return self.body() 224 except Exception, exc: 225 error("Thread %r crashed: %r" % (self.__class__.__name__, exc, ), 226 exc_info=sys.exc_info()) 227 os._exit(1) 228 229 def terminate(self): 230 self._state = TERMINATE 231 232 def close(self): 233 self._state = CLOSE 234 235 236 class Supervisor(PoolThread): 237 238 def __init__(self, pool): 239 self.pool = pool 240 super(Supervisor, self).__init__() 241 242 def body(self): 243 debug('worker handler starting') 244 while self._state == RUN and self.pool._state == RUN: 245 self.pool._maintain_pool() 246 time.sleep(0.8) 247 debug('worker handler exiting') 248 249 250 class TaskHandler(PoolThread): 251 252 def __init__(self, taskqueue, put, outqueue, pool): 253 self.taskqueue = taskqueue 254 self.put = put 255 self.outqueue = outqueue 256 self.pool = pool 257 super(TaskHandler, self).__init__() 258 259 def body(self): 260 taskqueue = self.taskqueue 261 outqueue = self.outqueue 262 put = self.put 263 pool = self.pool 264 265 for taskseq, set_length in iter(taskqueue.get, None): 266 i = -1 267 for i, task in enumerate(taskseq): 268 if self._state: 269 debug('task handler found thread._state != RUN') 270 break 271 try: 272 put(task) 273 except IOError: 274 debug('could not put task on queue') 275 break 276 else: 277 if set_length: 278 debug('doing set_length()') 279 set_length(i + 1) 280 continue 281 break 282 else: 283 debug('task handler got sentinel') 284 285 try: 286 # tell result handler to finish when cache is empty 287 debug('task handler sending sentinel to result handler') 288 outqueue.put(None) 289 290 # tell workers there is no more work 291 debug('task handler sending sentinel to workers') 292 for p in pool: 293 put(None) 294 except IOError: 295 debug('task handler got IOError when sending sentinels') 296 297 debug('task handler exiting') 298 299 300 class TimeoutHandler(PoolThread): 301 302 def __init__(self, processes, cache, t_soft, t_hard): 303 self.processes = processes 304 self.cache = cache 305 self.t_soft = t_soft 306 self.t_hard = t_hard 307 super(TimeoutHandler, self).__init__() 308 309 def body(self): 310 processes = self.processes 311 cache = self.cache 312 t_hard, t_soft = self.t_hard, self.t_soft 313 dirty = set() 314 315 def _process_by_pid(pid): 316 for index, process in enumerate(processes): 317 if process.pid == pid: 318 return process, index 319 return None, None 320 321 def _timed_out(start, timeout): 322 if not start or not timeout: 323 return False 324 if time.time() >= start + timeout: 325 return True 326 327 def _on_soft_timeout(job, i, soft_timeout): 328 debug('soft time limit exceeded for %i' % i) 329 process, _index = _process_by_pid(job._worker_pid) 330 if not process: 331 return 332 333 # Run timeout callback 334 if job._timeout_callback is not None: 335 job._timeout_callback(soft=True, timeout=soft_timeout) 336 337 try: 338 os.kill(job._worker_pid, SIG_SOFT_TIMEOUT) 339 except OSError, exc: 340 if exc.errno == errno.ESRCH: 341 pass 342 else: 343 raise 344 345 dirty.add(i) 346 347 def _on_hard_timeout(job, i, hard_timeout): 348 if job.ready(): 349 return 350 debug('hard time limit exceeded for %i', i) 351 # Remove from cache and set return value to an exception 352 exc_info = None 353 try: 354 raise TimeLimitExceeded(hard_timeout) 355 except TimeLimitExceeded: 356 exc_info = sys.exc_info() 357 job._set(i, (False, ExceptionInfo(exc_info))) 358 359 # Remove from _pool 360 process, _index = _process_by_pid(job._worker_pid) 361 362 # Run timeout callback 363 if job._timeout_callback is not None: 364 job._timeout_callback(soft=False, timeout=hard_timeout) 365 if process: 366 process.terminate() 367 368 # Inner-loop 369 while self._state == RUN: 370 371 # Remove dirty items not in cache anymore 372 if dirty: 373 dirty = set(k for k in dirty if k in cache) 374 375 for i, job in cache.items(): 376 ack_time = job._time_accepted 377 soft_timeout = job._soft_timeout 378 if soft_timeout is None: 379 soft_timeout = t_soft 380 hard_timeout = job._timeout 381 if hard_timeout is None: 382 hard_timeout = t_hard 383 if _timed_out(ack_time, hard_timeout): 384 _on_hard_timeout(job, i, hard_timeout) 385 elif i not in dirty and _timed_out(ack_time, soft_timeout): 386 _on_soft_timeout(job, i, soft_timeout) 387 388 time.sleep(0.5) # Don't waste CPU cycles. 389 390 debug('timeout handler exiting') 391 392 393 class ResultHandler(PoolThread): 394 395 def __init__(self, outqueue, get, cache, poll, 396 join_exited_workers, putlock): 397 self.outqueue = outqueue 398 self.get = get 399 self.cache = cache 400 self.poll = poll 401 self.join_exited_workers = join_exited_workers 402 self.putlock = putlock 403 super(ResultHandler, self).__init__() 404 405 def body(self): 406 get = self.get 407 outqueue = self.outqueue 408 cache = self.cache 409 poll = self.poll 410 join_exited_workers = self.join_exited_workers 411 putlock = self.putlock 412 413 def on_ack(job, i, time_accepted, pid): 414 try: 415 cache[job]._ack(i, time_accepted, pid) 416 except (KeyError, AttributeError): 417 # Object gone or doesn't support _ack (e.g. IMAPIterator). 418 pass 419 420 def on_ready(job, i, obj): 421 try: 422 item = cache[job] 423 except KeyError: 424 return 425 if not item.ready(): 426 if putlock is not None: 427 putlock.release() 428 try: 429 item._set(i, obj) 430 except KeyError: 431 pass 432 433 state_handlers = {ACK: on_ack, READY: on_ready} 434 435 def on_state_change(task): 436 state, args = task 437 try: 438 state_handlers[state](*args) 439 except KeyError: 440 debug("Unknown job state: %s (args=%s)" % (state, args)) 441 442 debug('result handler starting') 443 while 1: 444 try: 445 ready, task = poll(1.0) 446 except (IOError, EOFError), exc: 447 debug('result handler got %r -- exiting' % (exc, )) 448 return 449 450 if self._state: 451 assert self._state == TERMINATE 452 debug('result handler found thread._state=TERMINATE') 453 break 454 455 if ready: 456 if task is None: 457 debug('result handler got sentinel') 458 break 459 460 on_state_change(task) 461 462 time_terminate = None 463 while cache and self._state != TERMINATE: 464 try: 465 ready, task = poll(1.0) 466 except (IOError, EOFError), exc: 467 debug('result handler got %r -- exiting' % (exc, )) 468 return 469 470 if ready: 471 if task is None: 472 debug('result handler ignoring extra sentinel') 473 continue 474 475 on_state_change(task) 476 try: 477 join_exited_workers(shutdown=True) 478 except WorkersJoined: 479 now = time.time() 480 if not time_terminate: 481 time_terminate = now 482 else: 483 if now - time_terminate > 5.0: 484 debug('result handler exiting: timed out') 485 break 486 debug('result handler: all workers terminated, ' 487 'timeout in %ss' % ( 488 abs(min(now - time_terminate - 5.0, 0)))) 489 490 if hasattr(outqueue, '_reader'): 491 debug('ensuring that outqueue is not full') 492 # If we don't make room available in outqueue then 493 # attempts to add the sentinel (None) to outqueue may 494 # block. There is guaranteed to be no more than 2 sentinels. 495 try: 496 for i in range(10): 497 if not outqueue._reader.poll(): 498 break 499 get() 500 except (IOError, EOFError): 501 pass 502 503 debug('result handler exiting: len(cache)=%s, thread._state=%s', 504 len(cache), self._state) 505 506 507 class Pool(object): 508 ''' 509 Class which supports an async version of the `apply()` builtin 510 ''' 511 Process = Process 512 Supervisor = Supervisor 513 TaskHandler = TaskHandler 514 TimeoutHandler = TimeoutHandler 515 ResultHandler = ResultHandler 516 SoftTimeLimitExceeded = SoftTimeLimitExceeded 517 518 def __init__(self, processes=None, initializer=None, initargs=(), 519 maxtasksperchild=None, timeout=None, soft_timeout=None): 520 self._setup_queues() 521 self._taskqueue = Queue.Queue() 522 self._cache = {} 523 self._state = RUN 524 self.timeout = timeout 525 self.soft_timeout = soft_timeout 526 self._maxtasksperchild = maxtasksperchild 527 self._initializer = initializer 528 self._initargs = initargs 529 530 if soft_timeout and SIG_SOFT_TIMEOUT is None: 531 warnings.warn(UserWarning("Soft timeouts are not supported: " 532 "on this platform: It does not have the SIGUSR1 signal.")) 533 soft_timeout = None 534 535 if processes is None: 536 try: 537 processes = cpu_count() 538 except NotImplementedError: 539 processes = 1 540 self._processes = processes 541 542 if initializer is not None and not hasattr(initializer, '__call__'): 543 raise TypeError('initializer must be a callable') 544 545 self._pool = [] 546 for i in range(processes): 547 self._create_worker_process() 548 549 self._worker_handler = self.Supervisor(self) 550 self._worker_handler.start() 551 552 self._putlock = LaxBoundedSemaphore(self._processes) 553 self._task_handler = self.TaskHandler(self._taskqueue, 554 self._quick_put, 555 self._outqueue, 556 self._pool) 557 self._task_handler.start() 558 559 # Thread killing timedout jobs. 560 self._timeout_handler = None 561 self._timeout_handler_mutex = threading.Lock() 562 if self.timeout is not None or self.soft_timeout is not None: 563 self._start_timeout_handler() 564 565 # Thread processing results in the outqueue. 566 self._result_handler = self.ResultHandler(self._outqueue, 567 self._quick_get, self._cache, 568 self._poll_result, 569 self._join_exited_workers, 570 self._putlock) 571 self._result_handler.start() 572 573 self._terminate = Finalize( 574 self, self._terminate_pool, 575 args=(self._taskqueue, self._inqueue, self._outqueue, 576 self._pool, self._worker_handler, self._task_handler, 577 self._result_handler, self._cache, 578 self._timeout_handler), 579 exitpriority=15, 580 ) 581 582 def _create_worker_process(self): 583 w = self.Process( 584 target=worker, 585 args=(self._inqueue, self._outqueue, 586 self._initializer, self._initargs, 587 self._maxtasksperchild), 588 ) 589 self._pool.append(w) 590 w.name = w.name.replace('Process', 'PoolWorker') 591 w.daemon = True 592 w.start() 593 return w 594 595 def _join_exited_workers(self, shutdown=False, lost_worker_timeout=10.0): 596 """Cleanup after any worker processes which have exited due to 597 reaching their specified lifetime. Returns True if any workers were 598 cleaned up. 599 """ 600 now = None 601 # The worker may have published a result before being terminated, 602 # but we have no way to accurately tell if it did. So we wait for 603 # 10 seconds before we mark the job with WorkerLostError. 604 for job in [job for job in self._cache.values() 605 if not job.ready() and job._worker_lost]: 606 now = now or time.time() 607 if now - job._worker_lost > lost_worker_timeout: 608 exc_info = None 609 try: 610 raise WorkerLostError("Worker exited prematurely.") 611 except WorkerLostError: 612 exc_info = ExceptionInfo(sys.exc_info()) 613 job._set(None, (False, exc_info)) 614 615 if shutdown and not len(self._pool): 616 raise WorkersJoined() 617 618 cleaned = [] 619 for i in reversed(range(len(self._pool))): 620 worker = self._pool[i] 621 if worker.exitcode is not None: 622 # worker exited 623 debug('Supervisor: cleaning up worker %d' % i) 624 worker.join() 625 debug('Supervisor: worked %d joined' % i) 626 cleaned.append(worker.pid) 627 del self._pool[i] 628 if cleaned: 629 for job in self._cache.values(): 630 for worker_pid in job.worker_pids(): 631 if worker_pid in cleaned and not job.ready(): 632 job._worker_lost = time.time() 633 continue 634 if self._putlock is not None: 635 for worker in cleaned: 636 self._putlock.release() 637 return True 638 return False 639 640 def shrink(self, n=1): 641 for i, worker in enumerate(self._iterinactive()): 642 self._processes -= 1 643 if self._putlock: 644 self._putlock._initial_value -= 1 645 self._putlock.acquire() 646 worker.terminate() 647 if i == n - 1: 648 return 649 raise ValueError("Can't shrink pool. All processes busy!") 650 651 def grow(self, n=1): 652 for i in xrange(n): 653 #assert len(self._pool) == self._processes 654 self._processes += 1 655 if self._putlock: 656 cond = self._putlock._Semaphore__cond 657 cond.acquire() 658 try: 659 self._putlock._initial_value += 1 660 self._putlock._Semaphore__value += 1 661 cond.notify() 662 finally: 663 cond.release() 664 665 def _iterinactive(self): 666 for worker in self._pool: 667 if not self._worker_active(worker): 668 yield worker 669 raise StopIteration() 670 671 def _worker_active(self, worker): 672 for job in self._cache.values(): 673 if worker.pid in job.worker_pids(): 674 return True 675 return False 676 677 def _repopulate_pool(self): 678 """Bring the number of pool processes up to the specified number, 679 for use after reaping workers which have exited. 680 """ 681 for i in range(self._processes - len(self._pool)): 682 if self._state != RUN: 683 return 684 self._create_worker_process() 685 debug('added worker') 686 687 def _maintain_pool(self): 688 """"Clean up any exited workers and start replacements for them. 689 """ 690 self._join_exited_workers() 691 self._repopulate_pool() 692 693 def _setup_queues(self): 694 from multiprocessing.queues import SimpleQueue 695 self._inqueue = SimpleQueue() 696 self._outqueue = SimpleQueue() 697 self._quick_put = self._inqueue._writer.send 698 self._quick_get = self._outqueue._reader.recv 699 700 def _poll_result(timeout): 701 if self._outqueue._reader.poll(timeout): 702 return True, self._quick_get() 703 return False, None 704 self._poll_result = _poll_result 705 706 def _start_timeout_handler(self): 707 # ensure more than one thread does not start the timeout handler 708 # thread at once. 709 self._timeout_handler_mutex.acquire() 710 try: 711 if self._timeout_handler is None: 712 self._timeout_handler = self.TimeoutHandler( 713 self._pool, self._cache, 714 self.soft_timeout, self.timeout) 715 self._timeout_handler.start() 716 finally: 717 self._timeout_handler_mutex.release() 718 719 def apply(self, func, args=(), kwds={}): 720 ''' 721 Equivalent of `apply()` builtin 722 ''' 723 assert self._state == RUN 724 return self.apply_async(func, args, kwds).get() 725 726 def map(self, func, iterable, chunksize=None): 727 ''' 728 Equivalent of `map()` builtin 729 ''' 730 assert self._state == RUN 731 return self.map_async(func, iterable, chunksize).get() 732 733 def imap(self, func, iterable, chunksize=1): 734 ''' 735 Equivalent of `itertools.imap()` -- can be MUCH slower 736 than `Pool.map()` 737 ''' 738 assert self._state == RUN 739 if chunksize == 1: 740 result = IMapIterator(self._cache) 741 self._taskqueue.put((((result._job, i, func, (x,), {}) 742 for i, x in enumerate(iterable)), result._set_length)) 743 return result 744 else: 745 assert chunksize > 1 746 task_batches = Pool._get_tasks(func, iterable, chunksize) 747 result = IMapIterator(self._cache) 748 self._taskqueue.put((((result._job, i, mapstar, (x,), {}) 749 for i, x in enumerate(task_batches)), result._set_length)) 750 return (item for chunk in result for item in chunk) 751 752 def imap_unordered(self, func, iterable, chunksize=1): 753 ''' 754 Like `imap()` method but ordering of results is arbitrary 755 ''' 756 assert self._state == RUN 757 if chunksize == 1: 758 result = IMapUnorderedIterator(self._cache) 759 self._taskqueue.put((((result._job, i, func, (x,), {}) 760 for i, x in enumerate(iterable)), result._set_length)) 761 return result 762 else: 763 assert chunksize > 1 764 task_batches = Pool._get_tasks(func, iterable, chunksize) 765 result = IMapUnorderedIterator(self._cache) 766 self._taskqueue.put((((result._job, i, mapstar, (x,), {}) 767 for i, x in enumerate(task_batches)), result._set_length)) 768 return (item for chunk in result for item in chunk) 769 770 def apply_async(self, func, args=(), kwds={}, 771 callback=None, accept_callback=None, timeout_callback=None, 772 waitforslot=False, error_callback=None, 773 soft_timeout=None, timeout=None): 774 ''' 775 Asynchronous equivalent of `apply()` builtin. 776 777 Callback is called when the functions return value is ready. 778 The accept callback is called when the job is accepted to be executed. 779 780 Simplified the flow is like this: 781 782 >>> if accept_callback: 783 ... accept_callback() 784 >>> retval = func(*args, **kwds) 785 >>> if callback: 786 ... callback(retval) 787 788 ''' 789 assert self._state == RUN 790 if soft_timeout and SIG_SOFT_TIMEOUT is None: 791 warnings.warn(UserWarning("Soft timeouts are not supported: " 792 "on this platform: It does not have the SIGUSR1 signal.")) 793 soft_timeout = None 794 if waitforslot and self._putlock is not None and self._state == RUN: 795 self._putlock.acquire() 796 if self._state == RUN: 797 result = ApplyResult(self._cache, callback, 798 accept_callback, timeout_callback, 799 error_callback, soft_timeout, timeout) 800 if timeout or soft_timeout: 801 # start the timeout handler thread when required. 802 self._start_timeout_handler() 803 self._taskqueue.put(([(result._job, None, 804 func, args, kwds)], None)) 805 return result 806 807 def map_async(self, func, iterable, chunksize=None, callback=None): 808 ''' 809 Asynchronous equivalent of `map()` builtin 810 ''' 811 assert self._state == RUN 812 if not hasattr(iterable, '__len__'): 813 iterable = list(iterable) 814 815 if chunksize is None: 816 chunksize, extra = divmod(len(iterable), len(self._pool) * 4) 817 if extra: 818 chunksize += 1 819 if len(iterable) == 0: 820 chunksize = 0 821 822 task_batches = Pool._get_tasks(func, iterable, chunksize) 823 result = MapResult(self._cache, chunksize, len(iterable), callback) 824 self._taskqueue.put((((result._job, i, mapstar, (x,), {}) 825 for i, x in enumerate(task_batches)), None)) 826 return result 827 828 @staticmethod 829 def _get_tasks(func, it, size): 830 it = iter(it) 831 while 1: 832 x = tuple(itertools.islice(it, size)) 833 if not x: 834 return 835 yield (func, x) 836 837 def __reduce__(self): 838 raise NotImplementedError( 839 'pool objects cannot be passed between ' 840 'processes or pickled') 841 842 def close(self): 843 debug('closing pool') 844 if self._state == RUN: 845 self._state = CLOSE 846 self._worker_handler.close() 847 self._worker_handler.join() 848 self._taskqueue.put(None) 849 if self._putlock: 850 self._putlock.clear() 851 852 def terminate(self): 853 debug('terminating pool') 854 self._state = TERMINATE 855 self._worker_handler.terminate() 856 self._terminate() 857 858 def join(self): 859 assert self._state in (CLOSE, TERMINATE) 860 debug('joining worker handler') 861 self._worker_handler.join() 862 debug('joining task handler') 863 self._task_handler.join() 864 debug('joining result handler') 865 self._result_handler.join() 866 debug('result handler joined') 867 for i, p in enumerate(self._pool): 868 debug('joining worker %s/%s (%r)' % (i, len(self._pool), p, )) 869 p.join() 870 871 @staticmethod 872 def _help_stuff_finish(inqueue, task_handler, size): 873 # task_handler may be blocked trying to put items on inqueue 874 debug('removing tasks from inqueue until task handler finished') 875 inqueue._rlock.acquire() 876 while task_handler.is_alive() and inqueue._reader.poll(): 877 inqueue._reader.recv() 878 time.sleep(0) 879 880 @classmethod 881 def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, 882 worker_handler, task_handler, 883 result_handler, cache, timeout_handler): 884 885 # this is guaranteed to only be called once 886 debug('finalizing pool') 887 888 worker_handler.terminate() 889 890 task_handler.terminate() 891 taskqueue.put(None) # sentinel 892 893 debug('helping task handler/workers to finish') 894 cls._help_stuff_finish(inqueue, task_handler, len(pool)) 895 896 result_handler.terminate() 897 outqueue.put(None) # sentinel 898 899 if timeout_handler is not None: 900 timeout_handler.terminate() 901 902 # Terminate workers which haven't already finished 903 if pool and hasattr(pool[0], 'terminate'): 904 debug('terminating workers') 905 for p in pool: 906 if p.exitcode is None: 907 p.terminate() 908 909 debug('joining task handler') 910 task_handler.join(1e100) 911 912 debug('joining result handler') 913 result_handler.join(1e100) 914 915 if timeout_handler is not None: 916 debug('joining timeout handler') 917 timeout_handler.join(1e100) 918 919 if pool and hasattr(pool[0], 'terminate'): 920 debug('joining pool workers') 921 for p in pool: 922 if p.is_alive(): 923 # worker has not yet exited 924 debug('cleaning up worker %d' % p.pid) 925 p.join() 926 debug('pool workers joined') 927 DynamicPool = Pool 928 929 # 930 # Class whose instances are returned by `Pool.apply_async()` 931 # 932 933 934 class ApplyResult(object): 935 _worker_lost = None 936 937 def __init__(self, cache, callback, accept_callback=None, 938 timeout_callback=None, error_callback=None, soft_timeout=None, 939 timeout=None): 940 self._mutex = threading.Lock() 941 self._cond = threading.Condition(threading.Lock()) 942 self._job = job_counter.next() 943 self._cache = cache 944 self._ready = False 945 self._callback = callback 946 self._accept_callback = accept_callback 947 self._errback = error_callback 948 self._timeout_callback = timeout_callback 949 self._timeout = timeout 950 self._soft_timeout = soft_timeout 951 952 self._accepted = False 953 self._worker_pid = None 954 self._time_accepted = None 955 cache[self._job] = self 956 957 def ready(self): 958 return self._ready 959 960 def accepted(self): 961 return self._accepted 962 963 def successful(self): 964 assert self._ready 965 return self._success 966 967 def worker_pids(self): 968 return filter(None, [self._worker_pid]) 969 970 def wait(self, timeout=None): 971 self._cond.acquire() 972 try: 973 if not self._ready: 974 self._cond.wait(timeout) 975 finally: 976 self._cond.release() 977 978 def get(self, timeout=None): 979 self.wait(timeout) 980 if not self._ready: 981 raise TimeoutError 982 if self._success: 983 return self._value 984 else: 985 raise self._value 986 987 def _set(self, i, obj): 988 self._mutex.acquire() 989 try: 990 self._success, self._value = obj 991 self._cond.acquire() 992 try: 993 self._ready = True 994 self._cond.notify() 995 finally: 996 self._cond.release() 997 if self._accepted: 998 self._cache.pop(self._job, None) 999 1000 # apply callbacks last 1001 if self._callback and self._success: 1002 self._callback(self._value) 1003 if self._errback and not self._success: 1004 self._errback(self._value) 1005 finally: 1006 self._mutex.release() 1007 1008 def _ack(self, i, time_accepted, pid): 1009 self._mutex.acquire() 1010 try: 1011 self._accepted = True 1012 self._time_accepted = time_accepted 1013 self._worker_pid = pid 1014 if self._ready: 1015 self._cache.pop(self._job, None) 1016 if self._accept_callback: 1017 self._accept_callback(pid, time_accepted) 1018 finally: 1019 self._mutex.release() 1020 1021 # 1022 # Class whose instances are returned by `Pool.map_async()` 1023 # 1024 1025 1026 class MapResult(ApplyResult): 1027 1028 def __init__(self, cache, chunksize, length, callback): 1029 ApplyResult.__init__(self, cache, callback) 1030 self._success = True 1031 self._length = length 1032 self._value = [None] * length 1033 self._accepted = [False] * length 1034 self._worker_pid = [None] * length 1035 self._time_accepted = [None] * length 1036 self._chunksize = chunksize 1037 if chunksize <= 0: 1038 self._number_left = 0 1039 self._ready = True 1040 else: 1041 self._number_left = length // chunksize + bool(length % chunksize) 1042 1043 def _set(self, i, success_result): 1044 success, result = success_result 1045 if success: 1046 self._value[i * self._chunksize:(i + 1) * self._chunksize] = result 1047 self._number_left -= 1 1048 if self._number_left == 0: 1049 if self._callback: 1050 self._callback(self._value) 1051 if self._accepted: 1052 self._cache.pop(self._job, None) 1053 self._cond.acquire() 1054 try: 1055 self._ready = True 1056 self._cond.notify() 1057 finally: 1058 self._cond.release() 1059 1060 else: 1061 self._success = False 1062 self._value = result 1063 if self._accepted: 1064 self._cache.pop(self._job, None) 1065 self._cond.acquire() 1066 try: 1067 self._ready = True 1068 self._cond.notify() 1069 finally: 1070 self._cond.release() 1071 1072 def _ack(self, i, time_accepted, pid): 1073 start = i * self._chunksize 1074 stop = (i + 1) * self._chunksize 1075 for j in range(start, stop): 1076 self._accepted[j] = True 1077 self._worker_pid[j] = pid 1078 self._time_accepted[j] = time_accepted 1079 if self._ready: 1080 self._cache.pop(self._job, None) 1081 1082 def accepted(self): 1083 return all(self._accepted) 1084 1085 def worker_pids(self): 1086 return filter(None, self._worker_pid) 1087 1088 # 1089 # Class whose instances are returned by `Pool.imap()` 1090 # 1091 1092 1093 class IMapIterator(object): 1094 1095 def __init__(self, cache): 1096 self._cond = threading.Condition(threading.Lock()) 1097 self._job = job_counter.next() 1098 self._cache = cache 1099 self._items = collections.deque() 1100 self._index = 0 1101 self._length = None 1102 self._unsorted = {} 1103 cache[self._job] = self 1104 1105 def __iter__(self): 1106 return self 1107 1108 def next(self, timeout=None): 1109 self._cond.acquire() 1110 try: 1111 try: 1112 item = self._items.popleft() 1113 except IndexError: 1114 if self._index == self._length: 1115 raise StopIteration 1116 self._cond.wait(timeout) 1117 try: 1118 item = self._items.popleft() 1119 except IndexError: 1120 if self._index == self._length: 1121 raise StopIteration 1122 raise TimeoutError 1123 finally: 1124 self._cond.release() 1125 1126 success, value = item 1127 if success: 1128 return value 1129 raise value 1130 1131 __next__ = next # XXX 1132 1133 def _set(self, i, obj): 1134 self._cond.acquire() 1135 try: 1136 if self._index == i: 1137 self._items.append(obj) 1138 self._index += 1 1139 while self._index in self._unsorted: 1140 obj = self._unsorted.pop(self._index) 1141 self._items.append(obj) 1142 self._index += 1 1143 self._cond.notify() 1144 else: 1145 self._unsorted[i] = obj 1146 1147 if self._index == self._length: 1148 del self._cache[self._job] 1149 finally: 1150 self._cond.release() 1151 1152 def _set_length(self, length): 1153 self._cond.acquire() 1154 try: 1155 self._length = length 1156 if self._index == self._length: 1157 self._cond.notify() 1158 del self._cache[self._job] 1159 finally: 1160 self._cond.release() 1161 1162 # 1163 # Class whose instances are returned by `Pool.imap_unordered()` 1164 # 1165 1166 1167 class IMapUnorderedIterator(IMapIterator): 1168 1169 def _set(self, i, obj): 1170 self._cond.acquire() 1171 try: 1172 self._items.append(obj) 1173 self._index += 1 1174 self._cond.notify() 1175 if self._index == self._length: 1176 del self._cache[self._job] 1177 finally: 1178 self._cond.release() 1179 1180 # 1181 # 1182 # 1183 1184 1185 class ThreadPool(Pool): 1186 1187 from multiprocessing.dummy import Process as DummyProcess 1188 Process = DummyProcess 1189 1190 def __init__(self, processes=None, initializer=None, initargs=()): 1191 Pool.__init__(self, processes, initializer, initargs) 1192 1193 def _setup_queues(self): 1194 self._inqueue = Queue.Queue() 1195 self._outqueue = Queue.Queue() 1196 self._quick_put = self._inqueue.put 1197 self._quick_get = self._outqueue.get 1198 1199 def _poll_result(timeout): 1200 try: 1201 return True, self._quick_get(timeout=timeout) 1202 except Queue.Empty: 1203 return False, None 1204 self._poll_result = _poll_result 1205 1206 @staticmethod 1207 def _help_stuff_finish(inqueue, task_handler, size): 1208 # put sentinels at head of inqueue to make workers finish 1209 inqueue.not_empty.acquire() 1210 try: 1211 inqueue.queue.clear() 1212 inqueue.queue.extend([None] * size) 1213 inqueue.not_empty.notify_all() 1214 finally: 1215 inqueue.not_empty.release() 1216 [end of celery/concurrency/processes/pool.py] [start of celery/concurrency/solo.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 import os 5 6 from .base import BasePool, apply_target 7 8 9 class TaskPool(BasePool): 10 """Solo task pool (blocking, inline).""" 11 12 def on_start(self): 13 self.pid = os.getpid() 14 15 def on_apply(self, target, args, kwargs, callback=None, 16 accept_callback=None, **_): 17 return apply_target(target, args, kwargs, 18 callback, accept_callback, self.pid) 19 20 def _get_info(self): 21 return {"max-concurrency": 1, 22 "processes": [self.pid], 23 "max-tasks-per-child": None, 24 "put-guarded-by-semaphore": True, 25 "timeouts": ()} 26 [end of celery/concurrency/solo.py] [start of celery/datastructures.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.datastructures 4 ~~~~~~~~~~~~~~~~~~~~~ 5 6 Custom types and data structures. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 from __future__ import with_statement 14 15 import sys 16 import time 17 import traceback 18 19 from itertools import chain 20 from threading import RLock 21 22 from .utils.compat import UserDict, OrderedDict 23 24 25 class AttributeDictMixin(object): 26 """Adds attribute access to mappings. 27 28 `d.key -> d[key]` 29 30 """ 31 32 def __getattr__(self, key): 33 """`d.key -> d[key]`""" 34 try: 35 return self[key] 36 except KeyError: 37 raise AttributeError("'%s' object has no attribute '%s'" % ( 38 self.__class__.__name__, key)) 39 40 def __setattr__(self, key, value): 41 """`d[key] = value -> d.key = value`""" 42 self[key] = value 43 44 45 class AttributeDict(dict, AttributeDictMixin): 46 """Dict subclass with attribute access.""" 47 pass 48 49 50 class DictAttribute(object): 51 """Dict interface to attributes. 52 53 `obj[k] -> obj.k` 54 55 """ 56 57 def __init__(self, obj): 58 self.obj = obj 59 60 def get(self, key, default=None): 61 try: 62 return self[key] 63 except KeyError: 64 return default 65 66 def setdefault(self, key, default): 67 try: 68 return self[key] 69 except KeyError: 70 self[key] = default 71 return default 72 73 def __getitem__(self, key): 74 try: 75 return getattr(self.obj, key) 76 except AttributeError: 77 raise KeyError(key) 78 79 def __setitem__(self, key, value): 80 setattr(self.obj, key, value) 81 82 def __contains__(self, key): 83 return hasattr(self.obj, key) 84 85 def _iterate_items(self): 86 return vars(self.obj).iteritems() 87 iteritems = _iterate_items 88 89 if sys.version_info >= (3, 0): 90 items = _iterate_items 91 else: 92 93 def items(self): 94 return list(self._iterate_items()) 95 96 97 class ConfigurationView(AttributeDictMixin): 98 """A view over an applications configuration dicts. 99 100 If the key does not exist in ``changes``, the ``defaults`` dict 101 is consulted. 102 103 :param changes: Dict containing changes to the configuration. 104 :param defaults: Dict containing the default configuration. 105 106 """ 107 changes = None 108 defaults = None 109 _order = None 110 111 def __init__(self, changes, defaults): 112 self.__dict__.update(changes=changes, defaults=defaults, 113 _order=[changes] + defaults) 114 115 def __getitem__(self, key): 116 for d in self._order: 117 try: 118 return d[key] 119 except KeyError: 120 pass 121 raise KeyError(key) 122 123 def __setitem__(self, key, value): 124 self.changes[key] = value 125 126 def get(self, key, default=None): 127 try: 128 return self[key] 129 except KeyError: 130 return default 131 132 def setdefault(self, key, default): 133 try: 134 return self[key] 135 except KeyError: 136 self[key] = default 137 return default 138 139 def update(self, *args, **kwargs): 140 return self.changes.update(*args, **kwargs) 141 142 def __contains__(self, key): 143 for d in self._order: 144 if key in d: 145 return True 146 return False 147 148 def __repr__(self): 149 return repr(dict(self.iteritems())) 150 151 def __iter__(self): 152 return self.iterkeys() 153 154 def _iter(self, op): 155 # defaults must be first in the stream, so values in 156 # changes takes precedence. 157 return chain(*[op(d) for d in reversed(self._order)]) 158 159 def _iterate_keys(self): 160 return self._iter(lambda d: d.iterkeys()) 161 iterkeys = _iterate_keys 162 163 def _iterate_items(self): 164 return self._iter(lambda d: d.iteritems()) 165 iteritems = _iterate_items 166 167 def _iterate_values(self): 168 return self._iter(lambda d: d.itervalues()) 169 itervalues = _iterate_values 170 171 def keys(self): 172 return list(self._iterate_keys()) 173 174 def items(self): 175 return list(self._iterate_items()) 176 177 def values(self): 178 return list(self._iterate_values()) 179 180 181 class _Code(object): 182 183 def __init__(self, code): 184 self.co_filename = code.co_filename 185 self.co_name = code.co_name 186 187 188 class _Frame(object): 189 Code = _Code 190 191 def __init__(self, frame): 192 self.f_globals = { 193 "__file__": frame.f_globals.get("__file__", "__main__"), 194 } 195 self.f_code = self.Code(frame.f_code) 196 197 198 class Traceback(object): 199 Frame = _Frame 200 201 def __init__(self, tb): 202 self.tb_frame = self.Frame(tb.tb_frame) 203 self.tb_lineno = tb.tb_lineno 204 if tb.tb_next is None: 205 self.tb_next = None 206 else: 207 self.tb_next = Traceback(tb.tb_next) 208 209 210 class ExceptionInfo(object): 211 """Exception wrapping an exception and its traceback. 212 213 :param exc_info: The exception info tuple as returned by 214 :func:`sys.exc_info`. 215 216 """ 217 218 #: Exception type. 219 type = None 220 221 #: Exception instance. 222 exception = None 223 224 #: Pickleable traceback instance for use with :mod:`traceback` 225 tb = None 226 227 #: String representation of the traceback. 228 traceback = None 229 230 def __init__(self, exc_info): 231 self.type, self.exception, tb = exc_info 232 self.tb = Traceback(tb) 233 self.traceback = ''.join(traceback.format_exception(*exc_info)) 234 235 def __str__(self): 236 return self.traceback 237 238 def __repr__(self): 239 return "<ExceptionInfo: %r>" % (self.exception, ) 240 241 @property 242 def exc_info(self): 243 return self.type, self.exception, self.tb 244 245 246 class LimitedSet(object): 247 """Kind-of Set with limitations. 248 249 Good for when you need to test for membership (`a in set`), 250 but the list might become to big, so you want to limit it so it doesn't 251 consume too much resources. 252 253 :keyword maxlen: Maximum number of members before we start 254 evicting expired members. 255 :keyword expires: Time in seconds, before a membership expires. 256 257 """ 258 __slots__ = ("maxlen", "expires", "_data") 259 260 def __init__(self, maxlen=None, expires=None): 261 self.maxlen = maxlen 262 self.expires = expires 263 self._data = {} 264 265 def add(self, value): 266 """Add a new member.""" 267 self._expire_item() 268 self._data[value] = time.time() 269 270 def clear(self): 271 """Remove all members""" 272 self._data.clear() 273 274 def pop_value(self, value): 275 """Remove membership by finding value.""" 276 self._data.pop(value, None) 277 278 def _expire_item(self): 279 """Hunt down and remove an expired item.""" 280 while 1: 281 if self.maxlen and len(self) >= self.maxlen: 282 value, when = self.first 283 if not self.expires or time.time() > when + self.expires: 284 try: 285 self.pop_value(value) 286 except TypeError: # pragma: no cover 287 continue 288 break 289 290 def __contains__(self, value): 291 return value in self._data 292 293 def update(self, other): 294 if isinstance(other, self.__class__): 295 self._data.update(other._data) 296 else: 297 self._data.update(other) 298 299 def as_dict(self): 300 return self._data 301 302 def __iter__(self): 303 return iter(self._data.keys()) 304 305 def __len__(self): 306 return len(self._data.keys()) 307 308 def __repr__(self): 309 return "LimitedSet([%s])" % (repr(self._data.keys())) 310 311 @property 312 def chronologically(self): 313 return sorted(self._data.items(), key=lambda (value, when): when) 314 315 @property 316 def first(self): 317 """Get the oldest member.""" 318 return self.chronologically[0] 319 320 321 class LRUCache(UserDict): 322 """LRU Cache implementation using a doubly linked list to track access. 323 324 :keyword limit: The maximum number of keys to keep in the cache. 325 When a new key is inserted and the limit has been exceeded, 326 the *Least Recently Used* key will be discarded from the 327 cache. 328 329 """ 330 331 def __init__(self, limit=None): 332 self.limit = limit 333 self.mutex = RLock() 334 self.data = OrderedDict() 335 336 def __getitem__(self, key): 337 with self.mutex: 338 value = self[key] = self.data.pop(key) 339 return value 340 341 def keys(self): 342 # userdict.keys in py3k calls __getitem__ 343 return self.data.keys() 344 345 def values(self): 346 return list(self._iterate_values()) 347 348 def items(self): 349 return list(self._iterate_items()) 350 351 def __setitem__(self, key, value): 352 # remove least recently used key. 353 with self.mutex: 354 if self.limit and len(self.data) >= self.limit: 355 self.data.pop(iter(self.data).next()) 356 self.data[key] = value 357 358 def __iter__(self): 359 return self.data.iterkeys() 360 361 def _iterate_items(self): 362 for k in self.data: 363 try: 364 yield (k, self.data[k]) 365 except KeyError: 366 pass 367 iteritems = _iterate_items 368 369 def _iterate_values(self): 370 for k in self.data: 371 try: 372 yield self.data[k] 373 except KeyError: 374 pass 375 itervalues = _iterate_values 376 377 378 class TokenBucket(object): 379 """Token Bucket Algorithm. 380 381 See http://en.wikipedia.org/wiki/Token_Bucket 382 Most of this code was stolen from an entry in the ASPN Python Cookbook: 383 http://code.activestate.com/recipes/511490/ 384 385 .. admonition:: Thread safety 386 387 This implementation may not be thread safe. 388 389 """ 390 391 #: The rate in tokens/second that the bucket will be refilled 392 fill_rate = None 393 394 #: Maximum number of tokensin the bucket. 395 capacity = 1 396 397 #: Timestamp of the last time a token was taken out of the bucket. 398 timestamp = None 399 400 def __init__(self, fill_rate, capacity=1): 401 self.capacity = float(capacity) 402 self._tokens = capacity 403 self.fill_rate = float(fill_rate) 404 self.timestamp = time.time() 405 406 def can_consume(self, tokens=1): 407 """Returns :const:`True` if `tokens` number of tokens can be consumed 408 from the bucket.""" 409 if tokens <= self._get_tokens(): 410 self._tokens -= tokens 411 return True 412 return False 413 414 def expected_time(self, tokens=1): 415 """Returns the expected time in seconds when a new token should be 416 available. 417 418 .. admonition:: Warning 419 420 This consumes a token from the bucket. 421 422 """ 423 _tokens = self._get_tokens() 424 tokens = max(tokens, _tokens) 425 return (tokens - _tokens) / self.fill_rate 426 427 def _get_tokens(self): 428 if self._tokens < self.capacity: 429 now = time.time() 430 delta = self.fill_rate * (now - self.timestamp) 431 self._tokens = min(self.capacity, self._tokens + delta) 432 self.timestamp = now 433 return self._tokens 434 [end of celery/datastructures.py] [start of celery/db/models.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 from datetime import datetime 5 6 import sqlalchemy as sa 7 8 from .. import states 9 10 from .session import ResultModelBase 11 12 # See docstring of a805d4bd for an explanation for this workaround ;) 13 if sa.__version__.startswith('0.5'): 14 from .dfd042c7 import PickleType 15 else: 16 from .a805d4bd import PickleType # noqa 17 18 19 class Task(ResultModelBase): 20 """Task result/status.""" 21 __tablename__ = "celery_taskmeta" 22 __table_args__ = {"sqlite_autoincrement": True} 23 24 id = sa.Column(sa.Integer, sa.Sequence("task_id_sequence"), 25 primary_key=True, 26 autoincrement=True) 27 task_id = sa.Column(sa.String(255), unique=True) 28 status = sa.Column(sa.String(50), default=states.PENDING) 29 result = sa.Column(PickleType, nullable=True) 30 date_done = sa.Column(sa.DateTime, default=datetime.now, 31 onupdate=datetime.now, nullable=True) 32 traceback = sa.Column(sa.Text, nullable=True) 33 34 def __init__(self, task_id): 35 self.task_id = task_id 36 37 def to_dict(self): 38 return {"task_id": self.task_id, 39 "status": self.status, 40 "result": self.result, 41 "traceback": self.traceback, 42 "date_done": self.date_done} 43 44 def __repr__(self): 45 return "<Task %s state: %s>" % (self.task_id, self.status) 46 47 48 class TaskSet(ResultModelBase): 49 """TaskSet result""" 50 __tablename__ = "celery_tasksetmeta" 51 __table_args__ = {"sqlite_autoincrement": True} 52 53 id = sa.Column(sa.Integer, sa.Sequence("taskset_id_sequence"), 54 autoincrement=True, primary_key=True) 55 taskset_id = sa.Column(sa.String(255), unique=True) 56 result = sa.Column(sa.PickleType, nullable=True) 57 date_done = sa.Column(sa.DateTime, default=datetime.now, 58 nullable=True) 59 60 def __init__(self, taskset_id, result): 61 self.taskset_id = taskset_id 62 self.result = result 63 64 def to_dict(self): 65 return {"taskset_id": self.taskset_id, 66 "result": self.result, 67 "date_done": self.date_done} 68 69 def __repr__(self): 70 return u"<TaskSet: %s>" % (self.taskset_id, ) 71 [end of celery/db/models.py] [start of celery/events/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.events 4 ~~~~~~~~~~~~~ 5 6 Events are messages sent for actions happening 7 in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT` 8 is enabled), used for monitoring purposes. 9 10 :copyright: (c) 2009 - 2011 by Ask Solem. 11 :license: BSD, see LICENSE for more details. 12 13 """ 14 from __future__ import absolute_import 15 from __future__ import with_statement 16 17 import time 18 import socket 19 import threading 20 21 from collections import deque 22 from contextlib import contextmanager 23 from itertools import count 24 25 from kombu.entity import Exchange, Queue 26 from kombu.messaging import Consumer, Producer 27 28 from ..app import app_or_default 29 from ..utils import uuid 30 31 event_exchange = Exchange("celeryev", type="topic") 32 33 34 def Event(type, _fields=None, **fields): 35 """Create an event. 36 37 An event is a dictionary, the only required field is ``type``. 38 39 """ 40 event = dict(_fields or {}, type=type, **fields) 41 if "timestamp" not in event: 42 event["timestamp"] = time.time() 43 return event 44 45 46 class EventDispatcher(object): 47 """Send events as messages. 48 49 :param connection: Connection to the broker. 50 51 :keyword hostname: Hostname to identify ourselves as, 52 by default uses the hostname returned by :func:`socket.gethostname`. 53 54 :keyword enabled: Set to :const:`False` to not actually publish any events, 55 making :meth:`send` a noop operation. 56 57 :keyword channel: Can be used instead of `connection` to specify 58 an exact channel to use when sending events. 59 60 :keyword buffer_while_offline: If enabled events will be buffered 61 while the connection is down. :meth:`flush` must be called 62 as soon as the connection is re-established. 63 64 You need to :meth:`close` this after use. 65 66 """ 67 68 def __init__(self, connection=None, hostname=None, enabled=True, 69 channel=None, buffer_while_offline=True, app=None, 70 serializer=None): 71 self.app = app_or_default(app) 72 self.connection = connection 73 self.channel = channel 74 self.hostname = hostname or socket.gethostname() 75 self.buffer_while_offline = buffer_while_offline 76 self.mutex = threading.Lock() 77 self.publisher = None 78 self._outbound_buffer = deque() 79 self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER 80 81 self.enabled = enabled 82 if self.enabled: 83 self.enable() 84 85 def __enter__(self): 86 return self 87 88 def __exit__(self, *exc_info): 89 self.close() 90 91 def enable(self): 92 self.publisher = Producer(self.channel or self.connection.channel(), 93 exchange=event_exchange, 94 serializer=self.serializer) 95 self.enabled = True 96 97 def disable(self): 98 if self.enabled: 99 self.enabled = False 100 self.close() 101 102 def send(self, type, **fields): 103 """Send event. 104 105 :param type: Kind of event. 106 :keyword \*\*fields: Event arguments. 107 108 """ 109 if self.enabled: 110 with self.mutex: 111 event = Event(type, hostname=self.hostname, 112 clock=self.app.clock.forward(), **fields) 113 try: 114 self.publisher.publish(event, 115 routing_key=type.replace("-", ".")) 116 except Exception, exc: 117 if not self.buffer_while_offline: 118 raise 119 self._outbound_buffer.append((type, fields, exc)) 120 121 def flush(self): 122 while self._outbound_buffer: 123 try: 124 type, fields, _ = self._outbound_buffer.popleft() 125 except IndexError: 126 return 127 self.send(type, **fields) 128 129 def copy_buffer(self, other): 130 self._outbound_buffer = other._outbound_buffer 131 132 def close(self): 133 """Close the event dispatcher.""" 134 self.mutex.locked() and self.mutex.release() 135 if self.publisher is not None: 136 if not self.channel: # close auto channel. 137 self.publisher.channel.close() 138 self.publisher = None 139 140 141 class EventReceiver(object): 142 """Capture events. 143 144 :param connection: Connection to the broker. 145 :keyword handlers: Event handlers. 146 147 :attr:`handlers` is a dict of event types and their handlers, 148 the special handler `"*"` captures all events that doesn't have a 149 handler. 150 151 """ 152 handlers = {} 153 154 def __init__(self, connection, handlers=None, routing_key="#", 155 node_id=None, app=None): 156 self.app = app_or_default(app) 157 self.connection = connection 158 if handlers is not None: 159 self.handlers = handlers 160 self.routing_key = routing_key 161 self.node_id = node_id or uuid() 162 self.queue = Queue("%s.%s" % ("celeryev", self.node_id), 163 exchange=event_exchange, 164 routing_key=self.routing_key, 165 auto_delete=True, 166 durable=False) 167 168 def process(self, type, event): 169 """Process the received event by dispatching it to the appropriate 170 handler.""" 171 handler = self.handlers.get(type) or self.handlers.get("*") 172 handler and handler(event) 173 174 @contextmanager 175 def consumer(self): 176 """Create event consumer. 177 178 .. warning:: 179 180 This creates a new channel that needs to be closed 181 by calling `consumer.channel.close()`. 182 183 """ 184 consumer = Consumer(self.connection.channel(), 185 queues=[self.queue], no_ack=True) 186 consumer.register_callback(self._receive) 187 with consumer: 188 yield consumer 189 consumer.channel.close() 190 191 def itercapture(self, limit=None, timeout=None, wakeup=True): 192 with self.consumer() as consumer: 193 if wakeup: 194 self.wakeup_workers(channel=consumer.channel) 195 196 yield consumer 197 198 self.drain_events(limit=limit, timeout=timeout) 199 200 def capture(self, limit=None, timeout=None, wakeup=True): 201 """Open up a consumer capturing events. 202 203 This has to run in the main process, and it will never 204 stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`. 205 206 """ 207 list(self.itercapture(limit=limit, timeout=timeout, wakeup=wakeup)) 208 209 def wakeup_workers(self, channel=None): 210 self.app.control.broadcast("heartbeat", 211 connection=self.connection, 212 channel=channel) 213 214 def drain_events(self, limit=None, timeout=None): 215 for iteration in count(0): 216 if limit and iteration >= limit: 217 break 218 try: 219 self.connection.drain_events(timeout=timeout) 220 except socket.timeout: 221 if timeout: 222 raise 223 except socket.error: 224 pass 225 226 def _receive(self, body, message): 227 type = body.pop("type").lower() 228 clock = body.get("clock") 229 if clock: 230 self.app.clock.adjust(clock) 231 self.process(type, Event(type, body)) 232 233 234 class Events(object): 235 236 def __init__(self, app=None): 237 self.app = app 238 239 def Receiver(self, connection, handlers=None, routing_key="#", 240 node_id=None): 241 return EventReceiver(connection, 242 handlers=handlers, 243 routing_key=routing_key, 244 node_id=node_id, 245 app=self.app) 246 247 def Dispatcher(self, connection=None, hostname=None, enabled=True, 248 channel=None, buffer_while_offline=True): 249 return EventDispatcher(connection, 250 hostname=hostname, 251 enabled=enabled, 252 channel=channel, 253 app=self.app) 254 255 def State(self): 256 from .state import State as _State 257 return _State() 258 259 @contextmanager 260 def default_dispatcher(self, hostname=None, enabled=True, 261 buffer_while_offline=False): 262 with self.app.amqp.publisher_pool.acquire(block=True) as pub: 263 with self.Dispatcher(pub.connection, hostname, enabled, 264 pub.channel, buffer_while_offline) as d: 265 yield d 266 [end of celery/events/__init__.py] [start of celery/events/cursesmon.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.events.cursesmon 4 ~~~~~~~~~~~~~~~~~~~~~~~ 5 6 Graphical monitor of Celery events using curses. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 import curses 15 import sys 16 import threading 17 import time 18 19 from datetime import datetime 20 from itertools import count 21 from textwrap import wrap 22 from math import ceil 23 24 from .. import __version__ 25 from .. import states 26 from ..app import app_or_default 27 from ..utils import abbr, abbrtask 28 29 BORDER_SPACING = 4 30 LEFT_BORDER_OFFSET = 3 31 UUID_WIDTH = 36 32 STATE_WIDTH = 8 33 TIMESTAMP_WIDTH = 8 34 MIN_WORKER_WIDTH = 15 35 MIN_TASK_WIDTH = 16 36 37 38 class CursesMonitor(object): 39 keymap = {} 40 win = None 41 screen_width = None 42 screen_delay = 10 43 selected_task = None 44 selected_position = 0 45 selected_str = "Selected: " 46 foreground = curses.COLOR_BLACK 47 background = curses.COLOR_WHITE 48 online_str = "Workers online: " 49 help_title = "Keys: " 50 help = ("j:up k:down i:info t:traceback r:result c:revoke ^c: quit") 51 greet = "celeryev %s" % __version__ 52 info_str = "Info: " 53 54 def __init__(self, state, keymap=None, app=None): 55 self.app = app_or_default(app) 56 self.keymap = keymap or self.keymap 57 self.state = state 58 default_keymap = {"J": self.move_selection_down, 59 "K": self.move_selection_up, 60 "C": self.revoke_selection, 61 "T": self.selection_traceback, 62 "R": self.selection_result, 63 "I": self.selection_info, 64 "L": self.selection_rate_limit} 65 self.keymap = dict(default_keymap, **self.keymap) 66 67 def format_row(self, uuid, task, worker, timestamp, state): 68 mx = self.display_width 69 70 # include spacing 71 detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH 72 uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH 73 74 if uuid_space < UUID_WIDTH: 75 uuid_width = uuid_space 76 else: 77 uuid_width = UUID_WIDTH 78 79 detail_width = detail_width - uuid_width - 1 80 task_width = int(ceil(detail_width / 2.0)) 81 worker_width = detail_width - task_width - 1 82 83 uuid = abbr(uuid, uuid_width).ljust(uuid_width) 84 worker = abbr(worker, worker_width).ljust(worker_width) 85 task = abbrtask(task, task_width).ljust(task_width) 86 state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH) 87 timestamp = timestamp.ljust(TIMESTAMP_WIDTH) 88 89 row = "%s %s %s %s %s " % (uuid, worker, task, timestamp, state) 90 if self.screen_width is None: 91 self.screen_width = len(row[:mx]) 92 return row[:mx] 93 94 @property 95 def screen_width(self): 96 _, mx = self.win.getmaxyx() 97 return mx 98 99 @property 100 def screen_height(self): 101 my, _ = self.win.getmaxyx() 102 return my 103 104 @property 105 def display_width(self): 106 _, mx = self.win.getmaxyx() 107 return mx - BORDER_SPACING 108 109 @property 110 def display_height(self): 111 my, _ = self.win.getmaxyx() 112 return my - 10 113 114 @property 115 def limit(self): 116 return self.display_height 117 118 def find_position(self): 119 if not self.tasks: 120 return 0 121 for i, e in enumerate(self.tasks): 122 if self.selected_task == e[0]: 123 return i 124 return 0 125 126 def move_selection_up(self): 127 self.move_selection(-1) 128 129 def move_selection_down(self): 130 self.move_selection(1) 131 132 def move_selection(self, direction=1): 133 if not self.tasks: 134 return 135 pos = self.find_position() 136 try: 137 self.selected_task = self.tasks[pos + direction][0] 138 except IndexError: 139 self.selected_task = self.tasks[0][0] 140 141 keyalias = {curses.KEY_DOWN: "J", 142 curses.KEY_UP: "K", 143 curses.KEY_ENTER: "I"} 144 145 def handle_keypress(self): 146 try: 147 key = self.win.getkey().upper() 148 except: 149 return 150 key = self.keyalias.get(key) or key 151 handler = self.keymap.get(key) 152 if handler is not None: 153 handler() 154 155 def alert(self, callback, title=None): 156 self.win.erase() 157 my, mx = self.win.getmaxyx() 158 y = blank_line = count(2).next 159 if title: 160 self.win.addstr(y(), 3, title, curses.A_BOLD | curses.A_UNDERLINE) 161 blank_line() 162 callback(my, mx, y()) 163 self.win.addstr(my - 1, 0, "Press any key to continue...", 164 curses.A_BOLD) 165 self.win.refresh() 166 while 1: 167 try: 168 return self.win.getkey().upper() 169 except: 170 pass 171 172 def selection_rate_limit(self): 173 if not self.selected_task: 174 return curses.beep() 175 task = self.state.tasks[self.selected_task] 176 if not task.name: 177 return curses.beep() 178 179 my, mx = self.win.getmaxyx() 180 r = "New rate limit: " 181 self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE) 182 self.win.addstr(my - 2, len(r) + 3, " " * (mx - len(r))) 183 rlimit = self.readline(my - 2, 3 + len(r)) 184 185 if rlimit: 186 reply = self.app.control.rate_limit(task.name, 187 rlimit.strip(), reply=True) 188 self.alert_remote_control_reply(reply) 189 190 def alert_remote_control_reply(self, reply): 191 192 def callback(my, mx, xs): 193 y = count(xs).next 194 if not reply: 195 self.win.addstr(y(), 3, "No replies received in 1s deadline.", 196 curses.A_BOLD + curses.color_pair(2)) 197 return 198 199 for subreply in reply: 200 curline = y() 201 202 host, response = subreply.items()[0] 203 host = "%s: " % host 204 self.win.addstr(curline, 3, host, curses.A_BOLD) 205 attr = curses.A_NORMAL 206 text = "" 207 if "error" in response: 208 text = response["error"] 209 attr |= curses.color_pair(2) 210 elif "ok" in response: 211 text = response["ok"] 212 attr |= curses.color_pair(3) 213 self.win.addstr(curline, 3 + len(host), text, attr) 214 215 return self.alert(callback, "Remote Control Command Replies") 216 217 def readline(self, x, y): 218 buffer = str() 219 curses.echo() 220 try: 221 i = 0 222 while True: 223 ch = self.win.getch(x, y + i) 224 if ch != -1: 225 if ch in (10, curses.KEY_ENTER): # enter 226 break 227 if ch in (27, ): 228 buffer = str() 229 break 230 buffer += chr(ch) 231 i += 1 232 finally: 233 curses.noecho() 234 return buffer 235 236 def revoke_selection(self): 237 if not self.selected_task: 238 return curses.beep() 239 reply = self.app.control.revoke(self.selected_task, reply=True) 240 self.alert_remote_control_reply(reply) 241 242 def selection_info(self): 243 if not self.selected_task: 244 return 245 246 def alert_callback(mx, my, xs): 247 my, mx = self.win.getmaxyx() 248 y = count(xs).next 249 task = self.state.tasks[self.selected_task] 250 info = task.info(extra=["state"]) 251 infoitems = [("args", info.pop("args", None)), 252 ("kwargs", info.pop("kwargs", None))] + info.items() 253 for key, value in infoitems: 254 if key is None: 255 continue 256 value = str(value) 257 curline = y() 258 keys = key + ": " 259 self.win.addstr(curline, 3, keys, curses.A_BOLD) 260 wrapped = wrap(value, mx - 2) 261 if len(wrapped) == 1: 262 self.win.addstr(curline, len(keys) + 3, 263 abbr(wrapped[0], 264 self.screen_width - (len(keys) + 3))) 265 else: 266 for subline in wrapped: 267 nexty = y() 268 if nexty >= my - 1: 269 subline = " " * 4 + "[...]" 270 elif nexty >= my: 271 break 272 self.win.addstr(nexty, 3, 273 abbr(" " * 4 + subline, self.screen_width - 4), 274 curses.A_NORMAL) 275 276 return self.alert(alert_callback, 277 "Task details for %s" % self.selected_task) 278 279 def selection_traceback(self): 280 if not self.selected_task: 281 return curses.beep() 282 task = self.state.tasks[self.selected_task] 283 if task.state not in states.EXCEPTION_STATES: 284 return curses.beep() 285 286 def alert_callback(my, mx, xs): 287 y = count(xs).next 288 for line in task.traceback.split("\n"): 289 self.win.addstr(y(), 3, line) 290 291 return self.alert(alert_callback, 292 "Task Exception Traceback for %s" % self.selected_task) 293 294 def selection_result(self): 295 if not self.selected_task: 296 return 297 298 def alert_callback(my, mx, xs): 299 y = count(xs).next 300 task = self.state.tasks[self.selected_task] 301 result = getattr(task, "result", None) or getattr(task, 302 "exception", None) 303 for line in wrap(result, mx - 2): 304 self.win.addstr(y(), 3, line) 305 306 return self.alert(alert_callback, 307 "Task Result for %s" % self.selected_task) 308 309 def display_task_row(self, lineno, task): 310 state_color = self.state_colors.get(task.state) 311 attr = curses.A_NORMAL 312 if task.uuid == self.selected_task: 313 attr = curses.A_STANDOUT 314 timestamp = datetime.fromtimestamp( 315 task.timestamp or time.time()) 316 timef = timestamp.strftime("%H:%M:%S") 317 line = self.format_row(task.uuid, task.name, 318 task.worker.hostname, 319 timef, task.state) 320 self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr) 321 322 if state_color: 323 self.win.addstr(lineno, 324 len(line) - STATE_WIDTH + BORDER_SPACING - 1, 325 task.state, state_color | attr) 326 327 def draw(self): 328 win = self.win 329 self.handle_keypress() 330 x = LEFT_BORDER_OFFSET 331 y = blank_line = count(2).next 332 my, mx = win.getmaxyx() 333 win.erase() 334 win.bkgd(" ", curses.color_pair(1)) 335 win.border() 336 win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) 337 blank_line() 338 win.addstr(y(), x, self.format_row("UUID", "TASK", 339 "WORKER", "TIME", "STATE"), 340 curses.A_BOLD | curses.A_UNDERLINE) 341 tasks = self.tasks 342 if tasks: 343 for row, (uuid, task) in enumerate(tasks): 344 if row > self.display_height: 345 break 346 347 if task.uuid: 348 lineno = y() 349 self.display_task_row(lineno, task) 350 351 # -- Footer 352 blank_line() 353 win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) 354 355 # Selected Task Info 356 if self.selected_task: 357 win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) 358 info = "Missing extended info" 359 detail = "" 360 try: 361 selection = self.state.tasks[self.selected_task] 362 except KeyError: 363 pass 364 else: 365 info = selection.info(["args", "kwargs", 366 "result", "runtime", "eta"]) 367 if "runtime" in info: 368 info["runtime"] = "%.2fs" % info["runtime"] 369 if "result" in info: 370 info["result"] = abbr(info["result"], 16) 371 info = " ".join("%s=%s" % (key, value) 372 for key, value in info.items()) 373 detail = "... -> key i" 374 infowin = abbr(info, 375 self.screen_width - len(self.selected_str) - 2, 376 detail) 377 win.addstr(my - 5, x + len(self.selected_str), infowin) 378 # Make ellipsis bold 379 if detail in infowin: 380 detailpos = len(infowin) - len(detail) 381 win.addstr(my - 5, x + len(self.selected_str) + detailpos, 382 detail, curses.A_BOLD) 383 else: 384 win.addstr(my - 5, x, "No task selected", curses.A_NORMAL) 385 386 # Workers 387 if self.workers: 388 win.addstr(my - 4, x, self.online_str, curses.A_BOLD) 389 win.addstr(my - 4, x + len(self.online_str), 390 ", ".join(sorted(self.workers)), curses.A_NORMAL) 391 else: 392 win.addstr(my - 4, x, "No workers discovered.") 393 394 # Info 395 win.addstr(my - 3, x, self.info_str, curses.A_BOLD) 396 win.addstr(my - 3, x + len(self.info_str), 397 "events:%s tasks:%s workers:%s/%s" % ( 398 self.state.event_count, self.state.task_count, 399 len([w for w in self.state.workers.values() 400 if w.alive]), 401 len(self.state.workers)), 402 curses.A_DIM) 403 404 # Help 405 self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) 406 self.safe_add_str(my - 2, x + len(self.help_title), self.help, 407 curses.A_DIM) 408 win.refresh() 409 410 def safe_add_str(self, y, x, string, *args, **kwargs): 411 if x + len(string) > self.screen_width: 412 string = string[:self.screen_width - x] 413 self.win.addstr(y, x, string, *args, **kwargs) 414 415 def init_screen(self): 416 self.win = curses.initscr() 417 self.win.nodelay(True) 418 self.win.keypad(True) 419 curses.start_color() 420 curses.init_pair(1, self.foreground, self.background) 421 # exception states 422 curses.init_pair(2, curses.COLOR_RED, self.background) 423 # successful state 424 curses.init_pair(3, curses.COLOR_GREEN, self.background) 425 # revoked state 426 curses.init_pair(4, curses.COLOR_MAGENTA, self.background) 427 # greeting 428 curses.init_pair(5, curses.COLOR_BLUE, self.background) 429 # started state 430 curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) 431 432 self.state_colors = {states.SUCCESS: curses.color_pair(3), 433 states.REVOKED: curses.color_pair(4), 434 states.STARTED: curses.color_pair(6)} 435 for state in states.EXCEPTION_STATES: 436 self.state_colors[state] = curses.color_pair(2) 437 438 curses.cbreak() 439 440 def resetscreen(self): 441 curses.nocbreak() 442 self.win.keypad(False) 443 curses.echo() 444 curses.endwin() 445 446 def nap(self): 447 curses.napms(self.screen_delay) 448 449 @property 450 def tasks(self): 451 return self.state.tasks_by_timestamp()[:self.limit] 452 453 @property 454 def workers(self): 455 return [hostname 456 for hostname, w in self.state.workers.items() 457 if w.alive] 458 459 460 class DisplayThread(threading.Thread): 461 462 def __init__(self, display): 463 self.display = display 464 self.shutdown = False 465 threading.Thread.__init__(self) 466 467 def run(self): 468 while not self.shutdown: 469 self.display.draw() 470 self.display.nap() 471 472 473 def evtop(app=None): 474 sys.stderr.write("-> evtop: starting capture...\n") 475 app = app_or_default(app) 476 state = app.events.State() 477 conn = app.broker_connection() 478 recv = app.events.Receiver(conn, handlers={"*": state.event}) 479 capture = recv.itercapture() 480 capture.next() 481 display = CursesMonitor(state, app=app) 482 display.init_screen() 483 refresher = DisplayThread(display) 484 refresher.start() 485 try: 486 capture.next() 487 except Exception: 488 refresher.shutdown = True 489 refresher.join() 490 display.resetscreen() 491 raise 492 except (KeyboardInterrupt, SystemExit): 493 conn and conn.close() 494 refresher.shutdown = True 495 refresher.join() 496 display.resetscreen() 497 498 499 if __name__ == "__main__": 500 evtop() 501 [end of celery/events/cursesmon.py] [start of celery/events/dumper.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.events.dumper 4 ~~~~~~~~~~~~~~~~~~~~ 5 6 THis is a simple program that dumps events to the console 7 as they happen. Think of it like a `tcpdump` for Celery events. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 import sys 16 17 from datetime import datetime 18 19 from ..app import app_or_default 20 from ..datastructures import LRUCache 21 22 23 TASK_NAMES = LRUCache(limit=0xFFF) 24 25 HUMAN_TYPES = {"worker-offline": "shutdown", 26 "worker-online": "started", 27 "worker-heartbeat": "heartbeat"} 28 29 30 def humanize_type(type): 31 try: 32 return HUMAN_TYPES[type.lower()] 33 except KeyError: 34 return type.lower().replace("-", " ") 35 36 37 class Dumper(object): 38 39 def on_event(self, event): 40 timestamp = datetime.fromtimestamp(event.pop("timestamp")) 41 type = event.pop("type").lower() 42 hostname = event.pop("hostname") 43 if type.startswith("task-"): 44 uuid = event.pop("uuid") 45 if type in ("task-received", "task-sent"): 46 task = TASK_NAMES[uuid] = "%s(%s) args=%s kwargs=%s" % ( 47 event.pop("name"), uuid, 48 event.pop("args"), 49 event.pop("kwargs")) 50 else: 51 task = TASK_NAMES.get(uuid, "") 52 return self.format_task_event(hostname, timestamp, 53 type, task, event) 54 fields = ", ".join("%s=%s" % (key, event[key]) 55 for key in sorted(event.keys())) 56 sep = fields and ":" or "" 57 print("%s [%s] %s%s %s" % (hostname, timestamp, 58 humanize_type(type), sep, fields)) 59 60 def format_task_event(self, hostname, timestamp, type, task, event): 61 fields = ", ".join("%s=%s" % (key, event[key]) 62 for key in sorted(event.keys())) 63 sep = fields and ":" or "" 64 print("%s [%s] %s%s %s %s" % (hostname, timestamp, 65 humanize_type(type), sep, task, fields)) 66 67 68 def evdump(app=None): 69 sys.stderr.write("-> evdump: starting capture...\n") 70 app = app_or_default(app) 71 dumper = Dumper() 72 conn = app.broker_connection() 73 recv = app.events.Receiver(conn, handlers={"*": dumper.on_event}) 74 try: 75 recv.capture() 76 except (KeyboardInterrupt, SystemExit): 77 conn and conn.close() 78 79 if __name__ == "__main__": 80 evdump() 81 [end of celery/events/dumper.py] [start of celery/events/snapshot.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.events.snapshot 4 ~~~~~~~~~~~~~~~~~~~~~~ 5 6 Consuming the events as a stream is not always suitable 7 so this module implements a system to take snapshots of the 8 state of a cluster at regular intervals. There is a full 9 implementation of this writing the snapshots to a database 10 in :mod:`djcelery.snapshots` in the `django-celery` distribution. 11 12 :copyright: (c) 2009 - 2011 by Ask Solem. 13 :license: BSD, see LICENSE for more details. 14 15 """ 16 from __future__ import absolute_import 17 18 import atexit 19 20 from .. import platforms 21 from ..app import app_or_default 22 from ..datastructures import TokenBucket 23 from ..utils import timer2, instantiate, LOG_LEVELS 24 from ..utils.dispatch import Signal 25 from ..utils.timeutils import rate 26 27 28 class Polaroid(object): 29 timer = timer2 30 shutter_signal = Signal(providing_args=("state", )) 31 cleanup_signal = Signal() 32 clear_after = False 33 34 _tref = None 35 _ctref = None 36 37 def __init__(self, state, freq=1.0, maxrate=None, 38 cleanup_freq=3600.0, logger=None, timer=None, app=None): 39 self.app = app_or_default(app) 40 self.state = state 41 self.freq = freq 42 self.cleanup_freq = cleanup_freq 43 self.timer = timer or self.timer 44 self.logger = logger or \ 45 self.app.log.get_default_logger(name="celery.cam") 46 self.maxrate = maxrate and TokenBucket(rate(maxrate)) 47 48 def install(self): 49 self._tref = self.timer.apply_interval(self.freq * 1000.0, 50 self.capture) 51 self._ctref = self.timer.apply_interval(self.cleanup_freq * 1000.0, 52 self.cleanup) 53 54 def on_shutter(self, state): 55 pass 56 57 def on_cleanup(self): 58 pass 59 60 def cleanup(self): 61 self.logger.debug("Cleanup: Running...") 62 self.cleanup_signal.send(None) 63 self.on_cleanup() 64 65 def shutter(self): 66 if self.maxrate is None or self.maxrate.can_consume(): 67 self.logger.debug("Shutter: %s", self.state) 68 self.shutter_signal.send(self.state) 69 self.on_shutter(self.state) 70 71 def capture(self): 72 self.state.freeze_while(self.shutter, clear_after=self.clear_after) 73 74 def cancel(self): 75 if self._tref: 76 self._tref() # flush all received events. 77 self._tref.cancel() 78 if self._ctref: 79 self._ctref.cancel() 80 81 def __enter__(self): 82 self.install() 83 return self 84 85 def __exit__(self, *exc_info): 86 self.cancel() 87 88 89 def evcam(camera, freq=1.0, maxrate=None, loglevel=0, 90 logfile=None, pidfile=None, timer=None, app=None): 91 app = app_or_default(app) 92 93 if pidfile: 94 pidlock = platforms.create_pidlock(pidfile).acquire() 95 atexit.register(pidlock.release) 96 97 if not isinstance(loglevel, int): 98 loglevel = LOG_LEVELS[loglevel.upper()] 99 logger = app.log.setup_logger(loglevel=loglevel, 100 logfile=logfile, 101 name="celery.evcam") 102 103 logger.info( 104 "-> evcam: Taking snapshots with %s (every %s secs.)\n" % ( 105 camera, freq)) 106 state = app.events.State() 107 cam = instantiate(camera, state, app=app, 108 freq=freq, maxrate=maxrate, logger=logger, 109 timer=timer) 110 cam.install() 111 conn = app.broker_connection() 112 recv = app.events.Receiver(conn, handlers={"*": state.event}) 113 try: 114 try: 115 recv.capture(limit=None) 116 except KeyboardInterrupt: 117 raise SystemExit 118 finally: 119 cam.cancel() 120 conn.close() 121 [end of celery/events/snapshot.py] [start of celery/exceptions.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.exceptions 4 ~~~~~~~~~~~~~~~~~ 5 6 This module contains Celery-specific exceptions. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 UNREGISTERED_FMT = """\ 15 Task of kind %s is not registered, please make sure it's imported.\ 16 """ 17 18 19 class SystemTerminate(SystemExit): 20 """Signals that the worker should terminate.""" 21 22 23 class QueueNotFound(KeyError): 24 """Task routed to a queue not in CELERY_QUEUES.""" 25 26 27 class TimeLimitExceeded(Exception): 28 """The time limit has been exceeded and the job has been terminated.""" 29 30 31 class SoftTimeLimitExceeded(Exception): 32 """The soft time limit has been exceeded. This exception is raised 33 to give the task a chance to clean up.""" 34 35 36 class WorkerLostError(Exception): 37 """The worker processing a job has exited prematurely.""" 38 39 40 class ImproperlyConfigured(Exception): 41 """Celery is somehow improperly configured.""" 42 43 44 class NotRegistered(KeyError): 45 """The task is not registered.""" 46 47 def __repr__(self): 48 return UNREGISTERED_FMT % str(self) 49 50 51 class AlreadyRegistered(Exception): 52 """The task is already registered.""" 53 54 55 class TimeoutError(Exception): 56 """The operation timed out.""" 57 58 59 class MaxRetriesExceededError(Exception): 60 """The tasks max restart limit has been exceeded.""" 61 62 63 class RetryTaskError(Exception): 64 """The task is to be retried later.""" 65 66 def __init__(self, message, exc, *args, **kwargs): 67 self.exc = exc 68 Exception.__init__(self, message, exc, *args, **kwargs) 69 70 71 class TaskRevokedError(Exception): 72 """The task has been revoked, so no result available.""" 73 74 75 class NotConfigured(UserWarning): 76 """Celery has not been configured, as no config module has been found.""" 77 78 79 class CPendingDeprecationWarning(PendingDeprecationWarning): 80 pass 81 82 83 class CDeprecationWarning(DeprecationWarning): 84 pass 85 [end of celery/exceptions.py] [start of celery/execute/trace.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.execute.trace 4 ~~~~~~~~~~~~~~~~~~~~ 5 6 This module defines how the task execution is traced: 7 errors are recorded, handlers are applied and so on. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 import sys 16 import traceback 17 18 from .. import states, signals 19 from ..datastructures import ExceptionInfo 20 from ..exceptions import RetryTaskError 21 from ..registry import tasks 22 23 24 class TraceInfo(object): 25 26 def __init__(self, status=states.PENDING, retval=None, exc_info=None): 27 self.status = status 28 self.retval = retval 29 self.exc_info = exc_info 30 self.exc_type = None 31 self.exc_value = None 32 self.tb = None 33 self.strtb = None 34 if self.exc_info: 35 self.exc_type, self.exc_value, self.tb = exc_info 36 self.strtb = "\n".join(traceback.format_exception(*exc_info)) 37 38 @classmethod 39 def trace(cls, fun, args, kwargs, propagate=False): 40 """Trace the execution of a function, calling the appropiate callback 41 if the function raises retry, an failure or returned successfully. 42 43 :keyword propagate: If true, errors will propagate to the caller. 44 45 """ 46 try: 47 return cls(states.SUCCESS, retval=fun(*args, **kwargs)) 48 except RetryTaskError, exc: 49 return cls(states.RETRY, retval=exc, exc_info=sys.exc_info()) 50 except Exception, exc: 51 if propagate: 52 raise 53 return cls(states.FAILURE, retval=exc, exc_info=sys.exc_info()) 54 except BaseException, exc: 55 raise 56 except: # pragma: no cover 57 # For Python2.5 where raising strings are still allowed 58 # (but deprecated) 59 if propagate: 60 raise 61 return cls(states.FAILURE, retval=None, exc_info=sys.exc_info()) 62 63 64 class TaskTrace(object): 65 66 def __init__(self, task_name, task_id, args, kwargs, task=None, 67 request=None, propagate=None, **_): 68 self.task_id = task_id 69 self.task_name = task_name 70 self.args = args 71 self.kwargs = kwargs 72 self.task = task or tasks[self.task_name] 73 self.request = request or {} 74 self.status = states.PENDING 75 self.strtb = None 76 self.propagate = propagate 77 self._trace_handlers = {states.FAILURE: self.handle_failure, 78 states.RETRY: self.handle_retry, 79 states.SUCCESS: self.handle_success} 80 81 def __call__(self): 82 return self.execute() 83 84 def execute(self): 85 self.task.request.update(self.request, args=self.args, 86 called_directly=False, kwargs=self.kwargs) 87 signals.task_prerun.send(sender=self.task, task_id=self.task_id, 88 task=self.task, args=self.args, 89 kwargs=self.kwargs) 90 retval = self._trace() 91 92 signals.task_postrun.send(sender=self.task, task_id=self.task_id, 93 task=self.task, args=self.args, 94 kwargs=self.kwargs, retval=retval) 95 self.task.request.clear() 96 return retval 97 98 def _trace(self): 99 trace = TraceInfo.trace(self.task, self.args, self.kwargs, 100 propagate=self.propagate) 101 self.status = trace.status 102 self.strtb = trace.strtb 103 handler = self._trace_handlers[trace.status] 104 r = handler(trace.retval, trace.exc_type, trace.tb, trace.strtb) 105 self.handle_after_return(trace.status, trace.retval, 106 trace.exc_type, trace.tb, trace.strtb, 107 einfo=trace.exc_info) 108 return r 109 110 def handle_after_return(self, status, retval, type_, tb, strtb, 111 einfo=None): 112 if status in states.EXCEPTION_STATES: 113 einfo = ExceptionInfo(einfo) 114 self.task.after_return(status, retval, self.task_id, 115 self.args, self.kwargs, einfo) 116 117 def handle_success(self, retval, *args): 118 """Handle successful execution.""" 119 self.task.on_success(retval, self.task_id, self.args, self.kwargs) 120 return retval 121 122 def handle_retry(self, exc, type_, tb, strtb): 123 """Handle retry exception.""" 124 # Create a simpler version of the RetryTaskError that stringifies 125 # the original exception instead of including the exception instance. 126 # This is for reporting the retry in logs, email etc, while 127 # guaranteeing pickleability. 128 message, orig_exc = exc.args 129 expanded_msg = "%s: %s" % (message, str(orig_exc)) 130 einfo = ExceptionInfo((type_, type_(expanded_msg, None), tb)) 131 self.task.on_retry(exc, self.task_id, self.args, self.kwargs, einfo) 132 return einfo 133 134 def handle_failure(self, exc, type_, tb, strtb): 135 """Handle exception.""" 136 einfo = ExceptionInfo((type_, exc, tb)) 137 self.task.on_failure(exc, self.task_id, self.args, self.kwargs, einfo) 138 signals.task_failure.send(sender=self.task, task_id=self.task_id, 139 exception=exc, args=self.args, 140 kwargs=self.kwargs, traceback=tb, 141 einfo=einfo) 142 return einfo 143 [end of celery/execute/trace.py] [start of celery/loaders/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.loaders 4 ~~~~~~~~~~~~~~ 5 6 Loaders define how configuration is read, what happens 7 when workers start, when tasks are executed and so on. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 from .. import current_app 16 from ..utils import deprecated, get_cls_by_name 17 18 LOADER_ALIASES = {"app": "celery.loaders.app.AppLoader", 19 "default": "celery.loaders.default.Loader", 20 "django": "djcelery.loaders.DjangoLoader"} 21 22 23 def get_loader_cls(loader): 24 """Get loader class by name/alias""" 25 return get_cls_by_name(loader, LOADER_ALIASES) 26 27 28 @deprecated(deprecation="2.5", removal="3.0", 29 alternative="celery.current_app.loader") 30 def current_loader(): 31 return current_app.loader 32 33 34 @deprecated(deprecation="2.5", removal="3.0", 35 alternative="celery.current_app.conf") 36 def load_settings(): 37 return current_app.conf 38 [end of celery/loaders/__init__.py] [start of celery/log.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import absolute_import 3 4 import logging 5 import threading 6 import sys 7 import traceback 8 9 try: 10 from multiprocessing import current_process 11 from multiprocessing import util as mputil 12 except ImportError: 13 current_process = mputil = None # noqa 14 15 from . import current_app 16 from . import signals 17 from .local import Proxy 18 from .utils import LOG_LEVELS, isatty 19 from .utils.compat import LoggerAdapter, WatchedFileHandler 20 from .utils.encoding import safe_str, str_t 21 from .utils.patch import ensure_process_aware_logger 22 from .utils.term import colored 23 24 is_py3k = sys.version_info >= (3, 0) 25 26 27 class ColorFormatter(logging.Formatter): 28 #: Loglevel -> Color mapping. 29 COLORS = colored().names 30 colors = {"DEBUG": COLORS["blue"], "WARNING": COLORS["yellow"], 31 "ERROR": COLORS["red"], "CRITICAL": COLORS["magenta"]} 32 33 def __init__(self, msg, use_color=True): 34 logging.Formatter.__init__(self, msg) 35 self.use_color = use_color 36 37 def formatException(self, ei): 38 r = logging.Formatter.formatException(self, ei) 39 if isinstance(r, str) and not is_py3k: 40 return safe_str(r) 41 return r 42 43 def format(self, record): 44 levelname = record.levelname 45 color = self.colors.get(levelname) 46 47 if self.use_color and color: 48 try: 49 record.msg = safe_str(str_t(color(record.msg))) 50 except Exception, exc: 51 record.msg = "<Unrepresentable %r: %r>" % ( 52 type(record.msg), exc) 53 record.exc_info = sys.exc_info() 54 55 if not is_py3k: 56 # Very ugly, but have to make sure processName is supported 57 # by foreign logger instances. 58 # (processName is always supported by Python 2.7) 59 if "processName" not in record.__dict__: 60 process_name = (current_process and 61 current_process()._name or "") 62 record.__dict__["processName"] = process_name 63 return safe_str(logging.Formatter.format(self, record)) 64 65 66 class Logging(object): 67 #: The logging subsystem is only configured once per process. 68 #: setup_logging_subsystem sets this flag, and subsequent calls 69 #: will do nothing. 70 _setup = False 71 72 def __init__(self, app): 73 self.app = app 74 self.loglevel = self.app.conf.CELERYD_LOG_LEVEL 75 self.format = self.app.conf.CELERYD_LOG_FORMAT 76 self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT 77 self.colorize = self.app.conf.CELERYD_LOG_COLOR 78 79 def supports_color(self, logfile=None): 80 if self.app.IS_WINDOWS: 81 # Windows does not support ANSI color codes. 82 return False 83 if self.colorize is None: 84 # Only use color if there is no active log file 85 # and stderr is an actual terminal. 86 return logfile is None and isatty(sys.stderr) 87 return self.colorize 88 89 def colored(self, logfile=None): 90 return colored(enabled=self.supports_color(logfile)) 91 92 def get_task_logger(self, loglevel=None, name=None): 93 logger = logging.getLogger(name or "celery.task.default") 94 if loglevel is not None: 95 logger.setLevel(loglevel) 96 return logger 97 98 def setup_logging_subsystem(self, loglevel=None, logfile=None, 99 format=None, colorize=None, **kwargs): 100 if Logging._setup: 101 return 102 loglevel = loglevel or self.loglevel 103 format = format or self.format 104 if colorize is None: 105 colorize = self.supports_color(logfile) 106 107 if mputil and hasattr(mputil, "_logger"): 108 mputil._logger = None 109 if not is_py3k: 110 ensure_process_aware_logger() 111 receivers = signals.setup_logging.send(sender=None, 112 loglevel=loglevel, logfile=logfile, 113 format=format, colorize=colorize) 114 if not receivers: 115 root = logging.getLogger() 116 117 if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: 118 root.handlers = [] 119 120 mp = mputil.get_logger() if mputil else None 121 for logger in filter(None, (root, mp)): 122 self._setup_logger(logger, logfile, format, colorize, **kwargs) 123 logger.setLevel(loglevel) 124 signals.after_setup_logger.send(sender=None, logger=logger, 125 loglevel=loglevel, logfile=logfile, 126 format=format, colorize=colorize) 127 Logging._setup = True 128 129 return receivers 130 131 def _detect_handler(self, logfile=None): 132 """Create log handler with either a filename, an open stream 133 or :const:`None` (stderr).""" 134 logfile = sys.__stderr__ if logfile is None else logfile 135 if hasattr(logfile, "write"): 136 return logging.StreamHandler(logfile) 137 return WatchedFileHandler(logfile) 138 139 def get_default_logger(self, loglevel=None, name="celery"): 140 """Get default logger instance. 141 142 :keyword loglevel: Initial log level. 143 144 """ 145 logger = logging.getLogger(name) 146 if loglevel is not None: 147 logger.setLevel(loglevel) 148 return logger 149 150 def setup_logger(self, loglevel=None, logfile=None, 151 format=None, colorize=None, name="celery", root=True, 152 app=None, **kwargs): 153 """Setup the :mod:`multiprocessing` logger. 154 155 If `logfile` is not specified, then `sys.stderr` is used. 156 157 Returns logger object. 158 159 """ 160 loglevel = loglevel or self.loglevel 161 format = format or self.format 162 if colorize is None: 163 colorize = self.supports_color(logfile) 164 165 if not root or self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: 166 return self._setup_logger(self.get_default_logger(loglevel, name), 167 logfile, format, colorize, **kwargs) 168 self.setup_logging_subsystem(loglevel, logfile, 169 format, colorize, **kwargs) 170 return self.get_default_logger(name=name) 171 172 def setup_task_logger(self, loglevel=None, logfile=None, format=None, 173 colorize=None, task_name=None, task_id=None, propagate=False, 174 app=None, **kwargs): 175 """Setup the task logger. 176 177 If `logfile` is not specified, then `sys.stderr` is used. 178 179 Returns logger object. 180 181 """ 182 loglevel = loglevel or self.loglevel 183 format = format or self.task_format 184 if colorize is None: 185 colorize = self.supports_color(logfile) 186 187 logger = self._setup_logger(self.get_task_logger(loglevel, task_name), 188 logfile, format, colorize, **kwargs) 189 logger.propagate = int(propagate) # this is an int for some reason. 190 # better to not question why. 191 signals.after_setup_task_logger.send(sender=None, logger=logger, 192 loglevel=loglevel, logfile=logfile, 193 format=format, colorize=colorize) 194 return LoggerAdapter(logger, {"task_id": task_id, 195 "task_name": task_name}) 196 197 def redirect_stdouts_to_logger(self, logger, loglevel=None, 198 stdout=True, stderr=True): 199 """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a 200 logging instance. 201 202 :param logger: The :class:`logging.Logger` instance to redirect to. 203 :param loglevel: The loglevel redirected messages will be logged as. 204 205 """ 206 proxy = LoggingProxy(logger, loglevel) 207 if stdout: 208 sys.stdout = proxy 209 if stderr: 210 sys.stderr = proxy 211 return proxy 212 213 def _setup_logger(self, logger, logfile, format, colorize, 214 formatter=ColorFormatter, **kwargs): 215 216 if logger.handlers: # Logger already configured 217 return logger 218 219 handler = self._detect_handler(logfile) 220 handler.setFormatter(formatter(format, use_color=colorize)) 221 logger.addHandler(handler) 222 return logger 223 224 225 get_default_logger = Proxy(lambda: current_app.log.get_default_logger) 226 setup_logger = Proxy(lambda: current_app.log.setup_logger) 227 setup_task_logger = Proxy(lambda: current_app.log.setup_task_logger) 228 get_task_logger = Proxy(lambda: current_app.log.get_task_logger) 229 setup_logging_subsystem = Proxy( 230 lambda: current_app.log.setup_logging_subsystem) 231 redirect_stdouts_to_logger = Proxy( 232 lambda: current_app.log.redirect_stdouts_to_logger) 233 234 235 class LoggingProxy(object): 236 """Forward file object to :class:`logging.Logger` instance. 237 238 :param logger: The :class:`logging.Logger` instance to forward to. 239 :param loglevel: Loglevel to use when writing messages. 240 241 """ 242 mode = "w" 243 name = None 244 closed = False 245 loglevel = logging.ERROR 246 _thread = threading.local() 247 248 def __init__(self, logger, loglevel=None): 249 self.logger = logger 250 self.loglevel = loglevel or self.logger.level or self.loglevel 251 if not isinstance(self.loglevel, int): 252 self.loglevel = LOG_LEVELS[self.loglevel.upper()] 253 self._safewrap_handlers() 254 255 def _safewrap_handlers(self): 256 """Make the logger handlers dump internal errors to 257 `sys.__stderr__` instead of `sys.stderr` to circumvent 258 infinite loops.""" 259 260 def wrap_handler(handler): # pragma: no cover 261 262 class WithSafeHandleError(logging.Handler): 263 264 def handleError(self, record): 265 exc_info = sys.exc_info() 266 try: 267 try: 268 traceback.print_exception(exc_info[0], 269 exc_info[1], 270 exc_info[2], 271 None, sys.__stderr__) 272 except IOError: 273 pass # see python issue 5971 274 finally: 275 del(exc_info) 276 277 handler.handleError = WithSafeHandleError().handleError 278 279 return map(wrap_handler, self.logger.handlers) 280 281 def write(self, data): 282 if getattr(self._thread, "recurse_protection", False): 283 # Logger is logging back to this file, so stop recursing. 284 return 285 """Write message to logging object.""" 286 data = data.strip() 287 if data and not self.closed: 288 self._thread.recurse_protection = True 289 try: 290 self.logger.log(self.loglevel, safe_str(data)) 291 finally: 292 self._thread.recurse_protection = False 293 294 def writelines(self, sequence): 295 """`writelines(sequence_of_strings) -> None`. 296 297 Write the strings to the file. 298 299 The sequence can be any iterable object producing strings. 300 This is equivalent to calling :meth:`write` for each string. 301 302 """ 303 for part in sequence: 304 self.write(part) 305 306 def flush(self): 307 """This object is not buffered so any :meth:`flush` requests 308 are ignored.""" 309 pass 310 311 def close(self): 312 """When the object is closed, no write requests are forwarded to 313 the logging object anymore.""" 314 self.closed = True 315 316 def isatty(self): 317 """Always returns :const:`False`. Just here for file support.""" 318 return False 319 320 def fileno(self): 321 return None 322 323 324 class SilenceRepeated(object): 325 """Only log action every n iterations.""" 326 327 def __init__(self, action, max_iterations=10): 328 self.action = action 329 self.max_iterations = max_iterations 330 self._iterations = 0 331 332 def __call__(self, *msgs): 333 if not self._iterations or self._iterations >= self.max_iterations: 334 for msg in msgs: 335 self.action(msg) 336 self._iterations = 0 337 else: 338 self._iterations += 1 339 [end of celery/log.py] [start of celery/platforms.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.platforms 4 ~~~~~~~~~~~~~~~~ 5 6 Utilities dealing with platform specifics: signals, daemonization, 7 users, groups, and so on. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 import errno 16 import os 17 import platform as _platform 18 import shlex 19 import signal as _signal 20 import sys 21 22 from .local import try_import 23 24 _setproctitle = try_import("setproctitle") 25 resource = try_import("resource") 26 pwd = try_import("pwd") 27 grp = try_import("grp") 28 29 SYSTEM = _platform.system() 30 IS_OSX = SYSTEM == "Darwin" 31 IS_WINDOWS = SYSTEM == "Windows" 32 33 DAEMON_UMASK = 0 34 DAEMON_WORKDIR = "/" 35 DAEMON_REDIRECT_TO = getattr(os, "devnull", "/dev/null") 36 37 38 def pyimplementation(): 39 if hasattr(_platform, "python_implementation"): 40 return _platform.python_implementation() 41 elif sys.platform.startswith("java"): 42 return "Jython %s" % (sys.platform, ) 43 elif hasattr(sys, "pypy_version_info"): 44 v = ".".join(map(str, sys.pypy_version_info[:3])) 45 if sys.pypy_version_info[3:]: 46 v += "-" + "".join(map(str, sys.pypy_version_info[3:])) 47 return "PyPy %s" % (v, ) 48 else: 49 return "CPython" 50 51 52 class LockFailed(Exception): 53 """Raised if a pidlock can't be acquired.""" 54 pass 55 56 57 def get_fdmax(default=None): 58 """Returns the maximum number of open file descriptors 59 on this system. 60 61 :keyword default: Value returned if there's no file 62 descriptor limit. 63 64 """ 65 fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] 66 if fdmax == resource.RLIM_INFINITY: 67 return default 68 return fdmax 69 70 71 class PIDFile(object): 72 """PID lock file. 73 74 This is the type returned by :func:`create_pidlock`. 75 76 **Should not be used directly, use the :func:`create_pidlock` 77 context instead** 78 79 """ 80 81 #: Path to the pid lock file. 82 path = None 83 84 def __init__(self, path): 85 self.path = os.path.abspath(path) 86 87 def acquire(self): 88 """Acquire lock.""" 89 try: 90 self.write_pid() 91 except OSError, exc: 92 raise LockFailed(str(exc)) 93 return self 94 __enter__ = acquire 95 96 def is_locked(self): 97 """Returns true if the pid lock exists.""" 98 return os.path.exists(self.path) 99 100 def release(self, *args): 101 """Release lock.""" 102 self.remove() 103 __exit__ = release 104 105 def read_pid(self): 106 """Reads and returns the current pid.""" 107 try: 108 fh = open(self.path, "r") 109 except IOError, exc: 110 if exc.errno == errno.ENOENT: 111 return 112 raise 113 114 line = fh.readline().strip() 115 fh.close() 116 117 try: 118 return int(line) 119 except ValueError: 120 raise ValueError("PID file %r contents invalid." % self.path) 121 122 def remove(self): 123 """Removes the lock.""" 124 try: 125 os.unlink(self.path) 126 except OSError, exc: 127 if exc.errno in (errno.ENOENT, errno.EACCES): 128 return 129 raise 130 131 def remove_if_stale(self): 132 """Removes the lock if the process is not running. 133 (does not respond to signals).""" 134 try: 135 pid = self.read_pid() 136 except ValueError, exc: 137 sys.stderr.write("Broken pidfile found. Removing it.\n") 138 self.remove() 139 return True 140 if not pid: 141 self.remove() 142 return True 143 144 try: 145 os.kill(pid, 0) 146 except os.error, exc: 147 if exc.errno == errno.ESRCH: 148 sys.stderr.write("Stale pidfile exists. Removing it.\n") 149 self.remove() 150 return True 151 return False 152 153 def write_pid(self): 154 open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) 155 open_mode = (((os.R_OK | os.W_OK) << 6) | 156 ((os.R_OK) << 3) | 157 ((os.R_OK))) 158 pidfile_fd = os.open(self.path, open_flags, open_mode) 159 pidfile = os.fdopen(pidfile_fd, "w") 160 try: 161 pid = os.getpid() 162 pidfile.write("%d\n" % (pid, )) 163 finally: 164 pidfile.close() 165 166 167 def create_pidlock(pidfile): 168 """Create and verify pid file. 169 170 If the pid file already exists the program exits with an error message, 171 however if the process it refers to is not running anymore, the pid file 172 is deleted and the program continues. 173 174 The caller is responsible for releasing the lock before the program 175 exits. 176 177 :returns: :class:`PIDFile`. 178 179 **Example**: 180 181 .. code-block:: python 182 183 import atexit 184 pidlock = create_pidlock("/var/run/app.pid").acquire() 185 atexit.register(pidlock.release) 186 187 """ 188 189 pidlock = PIDFile(pidfile) 190 if pidlock.is_locked() and not pidlock.remove_if_stale(): 191 raise SystemExit( 192 "ERROR: Pidfile (%s) already exists.\n" 193 "Seems we're already running? (PID: %s)" % ( 194 pidfile, pidlock.read_pid())) 195 return pidlock 196 197 198 class DaemonContext(object): 199 _is_open = False 200 workdir = DAEMON_WORKDIR 201 umask = DAEMON_UMASK 202 203 def __init__(self, pidfile=None, workdir=None, 204 umask=None, **kwargs): 205 self.workdir = workdir or self.workdir 206 self.umask = self.umask if umask is None else umask 207 208 def open(self): 209 if not self._is_open: 210 self._detach() 211 212 os.chdir(self.workdir) 213 os.umask(self.umask) 214 215 for fd in reversed(range(get_fdmax(default=2048))): 216 try: 217 os.close(fd) 218 except OSError, exc: 219 if exc.errno != errno.EBADF: 220 raise 221 222 os.open(DAEMON_REDIRECT_TO, os.O_RDWR) 223 os.dup2(0, 1) 224 os.dup2(0, 2) 225 226 self._is_open = True 227 __enter__ = open 228 229 def close(self, *args): 230 if self._is_open: 231 self._is_open = False 232 __exit__ = close 233 234 def _detach(self): 235 if os.fork() == 0: # first child 236 os.setsid() # create new session 237 if os.fork() > 0: # second child 238 os._exit(0) 239 else: 240 os._exit(0) 241 return self 242 243 244 def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0, 245 workdir=None, **opts): 246 """Detach the current process in the background (daemonize). 247 248 :keyword logfile: Optional log file. The ability to write to this file 249 will be verified before the process is detached. 250 :keyword pidfile: Optional pid file. The pid file will not be created, 251 as this is the responsibility of the child. But the process will 252 exit if the pid lock exists and the pid written is still running. 253 :keyword uid: Optional user id or user name to change 254 effective privileges to. 255 :keyword gid: Optional group id or group name to change effective 256 privileges to. 257 :keyword umask: Optional umask that will be effective in the child process. 258 :keyword workdir: Optional new working directory. 259 :keyword \*\*opts: Ignored. 260 261 **Example**: 262 263 .. code-block:: python 264 265 import atexit 266 from celery.platforms import detached, create_pidlock 267 268 with detached(logfile="/var/log/app.log", pidfile="/var/run/app.pid", 269 uid="nobody"): 270 # Now in detached child process with effective user set to nobody, 271 # and we know that our logfile can be written to, and that 272 # the pidfile is not locked. 273 pidlock = create_pidlock("/var/run/app.pid").acquire() 274 atexit.register(pidlock.release) 275 276 # Run the program 277 program.run(logfile="/var/log/app.log") 278 279 """ 280 281 if not resource: 282 raise RuntimeError("This platform does not support detach.") 283 workdir = os.getcwd() if workdir is None else workdir 284 285 signals.reset("SIGCLD") # Make sure SIGCLD is using the default handler. 286 set_effective_user(uid=uid, gid=gid) 287 288 # Since without stderr any errors will be silently suppressed, 289 # we need to know that we have access to the logfile. 290 logfile and open(logfile, "a").close() 291 # Doesn't actually create the pidfile, but makes sure it's not stale. 292 pidfile and create_pidlock(pidfile) 293 294 return DaemonContext(umask=umask, workdir=workdir) 295 296 297 def parse_uid(uid): 298 """Parse user id. 299 300 uid can be an integer (uid) or a string (user name), if a user name 301 the uid is taken from the password file. 302 303 """ 304 try: 305 return int(uid) 306 except ValueError: 307 if pwd: 308 try: 309 return pwd.getpwnam(uid).pw_uid 310 except KeyError: 311 raise KeyError("User does not exist: %r" % (uid, )) 312 raise 313 314 315 def parse_gid(gid): 316 """Parse group id. 317 318 gid can be an integer (gid) or a string (group name), if a group name 319 the gid is taken from the password file. 320 321 """ 322 try: 323 return int(gid) 324 except ValueError: 325 if grp: 326 try: 327 return grp.getgrnam(gid).gr_gid 328 except KeyError: 329 raise KeyError("Group does not exist: %r" % (gid, )) 330 raise 331 332 333 def setegid(gid): 334 """Set effective group id.""" 335 gid = parse_gid(gid) 336 if gid != os.getegid(): 337 os.setegid(gid) 338 339 340 def seteuid(uid): 341 """Set effective user id.""" 342 uid = parse_uid(uid) 343 if uid != os.geteuid(): 344 os.seteuid(uid) 345 346 347 def setgid(gid): 348 os.setgid(parse_gid(gid)) 349 350 351 def setuid(uid): 352 os.setuid(parse_uid(uid)) 353 354 355 def set_effective_user(uid=None, gid=None): 356 """Change process privileges to new user/group. 357 358 If UID and GID is set the effective user/group is set. 359 360 If only UID is set, the effective user is set, and the group is 361 set to the users primary group. 362 363 If only GID is set, the effective group is set. 364 365 """ 366 uid = uid and parse_uid(uid) 367 gid = gid and parse_gid(gid) 368 369 if uid: 370 # If GID isn't defined, get the primary GID of the user. 371 if not gid and pwd: 372 gid = pwd.getpwuid(uid).pw_gid 373 setgid(gid) 374 setuid(uid) 375 else: 376 gid and setgid(gid) 377 378 379 class Signals(object): 380 """Convenience interface to :mod:`signals`. 381 382 If the requested signal is not supported on the current platform, 383 the operation will be ignored. 384 385 **Examples**: 386 387 .. code-block:: python 388 389 >>> from celery.platforms import signals 390 391 >>> signals["INT"] = my_handler 392 393 >>> signals["INT"] 394 my_handler 395 396 >>> signals.supported("INT") 397 True 398 399 >>> signals.signum("INT") 400 2 401 402 >>> signals.ignore("USR1") 403 >>> signals["USR1"] == signals.ignored 404 True 405 406 >>> signals.reset("USR1") 407 >>> signals["USR1"] == signals.default 408 True 409 410 >>> signals.update(INT=exit_handler, 411 ... TERM=exit_handler, 412 ... HUP=hup_handler) 413 414 """ 415 416 ignored = _signal.SIG_IGN 417 default = _signal.SIG_DFL 418 419 def supported(self, signal_name): 420 """Returns true value if ``signal_name`` exists on this platform.""" 421 try: 422 return self.signum(signal_name) 423 except AttributeError: 424 pass 425 426 def signum(self, signal_name): 427 """Get signal number from signal name.""" 428 if isinstance(signal_name, int): 429 return signal_name 430 if not isinstance(signal_name, basestring) \ 431 or not signal_name.isupper(): 432 raise TypeError("signal name must be uppercase string.") 433 if not signal_name.startswith("SIG"): 434 signal_name = "SIG" + signal_name 435 return getattr(_signal, signal_name) 436 437 def reset(self, *signal_names): 438 """Reset signals to the default signal handler. 439 440 Does nothing if the platform doesn't support signals, 441 or the specified signal in particular. 442 443 """ 444 self.update((sig, self.default) for sig in signal_names) 445 446 def ignore(self, *signal_names): 447 """Ignore signal using :const:`SIG_IGN`. 448 449 Does nothing if the platform doesn't support signals, 450 or the specified signal in particular. 451 452 """ 453 self.update((sig, self.ignored) for sig in signal_names) 454 455 def __getitem__(self, signal_name): 456 return _signal.getsignal(self.signum(signal_name)) 457 458 def __setitem__(self, signal_name, handler): 459 """Install signal handler. 460 461 Does nothing if the current platform doesn't support signals, 462 or the specified signal in particular. 463 464 """ 465 try: 466 _signal.signal(self.signum(signal_name), handler) 467 except (AttributeError, ValueError): 468 pass 469 470 def update(self, _d_=None, **sigmap): 471 """Set signal handlers from a mapping.""" 472 for signal_name, handler in dict(_d_ or {}, **sigmap).iteritems(): 473 self[signal_name] = handler 474 475 476 signals = Signals() 477 get_signal = signals.signum # compat 478 install_signal_handler = signals.__setitem__ # compat 479 reset_signal = signals.reset # compat 480 ignore_signal = signals.ignore # compat 481 482 483 def strargv(argv): 484 arg_start = 2 if "manage" in argv[0] else 1 485 if len(argv) > arg_start: 486 return " ".join(argv[arg_start:]) 487 return "" 488 489 490 def set_process_title(progname, info=None): 491 """Set the ps name for the currently running process. 492 493 Only works if :mod:`setproctitle` is installed. 494 495 """ 496 proctitle = "[%s]" % progname 497 proctitle = "%s %s" % (proctitle, info) if info else proctitle 498 if _setproctitle: 499 _setproctitle.setproctitle(proctitle) 500 return proctitle 501 502 503 def set_mp_process_title(progname, info=None, hostname=None): 504 """Set the ps name using the multiprocessing process name. 505 506 Only works if :mod:`setproctitle` is installed. 507 508 """ 509 if hostname: 510 progname = "%s@%s" % (progname, hostname.split(".")[0]) 511 try: 512 from multiprocessing.process import current_process 513 except ImportError: 514 return set_process_title(progname, info=info) 515 else: 516 return set_process_title("%s:%s" % (progname, 517 current_process().name), info=info) 518 519 520 def shellsplit(s, posix=True): 521 # posix= option to shlex.split first available in Python 2.6+ 522 lexer = shlex.shlex(s, posix=not IS_WINDOWS) 523 lexer.whitespace_split = True 524 lexer.commenters = '' 525 return list(lexer) 526 [end of celery/platforms.py] [start of celery/registry.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.registry 4 ~~~~~~~~~~~~~~~ 5 6 Registry of available tasks. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 import inspect 15 16 from .exceptions import NotRegistered 17 18 19 class TaskRegistry(dict): 20 21 NotRegistered = NotRegistered 22 23 def regular(self): 24 """Get all regular task types.""" 25 return self.filter_types("regular") 26 27 def periodic(self): 28 """Get all periodic task types.""" 29 return self.filter_types("periodic") 30 31 def register(self, task): 32 """Register a task in the task registry. 33 34 The task will be automatically instantiated if not already an 35 instance. 36 37 """ 38 self[task.name] = inspect.isclass(task) and task() or task 39 40 def unregister(self, name): 41 """Unregister task by name. 42 43 :param name: name of the task to unregister, or a 44 :class:`celery.task.base.Task` with a valid `name` attribute. 45 46 :raises celery.exceptions.NotRegistered: if the task has not 47 been registered. 48 49 """ 50 try: 51 # Might be a task class 52 name = name.name 53 except AttributeError: 54 pass 55 self.pop(name) 56 57 def filter_types(self, type): 58 """Return all tasks of a specific type.""" 59 return dict((name, task) for name, task in self.iteritems() 60 if task.type == type) 61 62 def __getitem__(self, key): 63 try: 64 return dict.__getitem__(self, key) 65 except KeyError: 66 raise self.NotRegistered(key) 67 68 def pop(self, key, *args): 69 try: 70 return dict.pop(self, key, *args) 71 except KeyError: 72 raise self.NotRegistered(key) 73 74 75 #: Global task registry. 76 tasks = TaskRegistry() 77 78 79 def _unpickle_task(name): 80 return tasks[name] 81 [end of celery/registry.py] [start of celery/result.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.result 4 ~~~~~~~~~~~~~ 5 6 Task results/state and groups of results. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 from __future__ import with_statement 14 15 import time 16 17 from copy import copy 18 from itertools import imap 19 20 from . import current_app 21 from . import states 22 from .app import app_or_default 23 from .exceptions import TimeoutError 24 from .registry import _unpickle_task 25 from .utils.compat import OrderedDict 26 27 28 def _unpickle_result(task_id, task_name): 29 return _unpickle_task(task_name).AsyncResult(task_id) 30 31 32 class BaseAsyncResult(object): 33 """Base class for pending result, supports custom task result backend. 34 35 :param task_id: see :attr:`task_id`. 36 :param backend: see :attr:`backend`. 37 38 """ 39 40 #: Error raised for timeouts. 41 TimeoutError = TimeoutError 42 43 #: The task uuid. 44 task_id = None 45 46 #: The task result backend to use. 47 backend = None 48 49 def __init__(self, task_id, backend, task_name=None, app=None): 50 self.app = app_or_default(app) 51 self.task_id = task_id 52 self.backend = backend 53 self.task_name = task_name 54 55 def forget(self): 56 """Forget about (and possibly remove the result of) this task.""" 57 self.backend.forget(self.task_id) 58 59 def revoke(self, connection=None, connect_timeout=None): 60 """Send revoke signal to all workers. 61 62 Any worker receiving the task, or having reserved the 63 task, *must* ignore it. 64 65 """ 66 self.app.control.revoke(self.task_id, connection=connection, 67 connect_timeout=connect_timeout) 68 69 def get(self, timeout=None, propagate=True, interval=0.5): 70 """Wait until task is ready, and return its result. 71 72 .. warning:: 73 74 Waiting for tasks within a task may lead to deadlocks. 75 Please read :ref:`task-synchronous-subtasks`. 76 77 :keyword timeout: How long to wait, in seconds, before the 78 operation times out. 79 :keyword propagate: Re-raise exception if the task failed. 80 :keyword interval: Time to wait (in seconds) before retrying to 81 retrieve the result. Note that this does not have any effect 82 when using the AMQP result store backend, as it does not 83 use polling. 84 85 :raises celery.exceptions.TimeoutError: if `timeout` is not 86 :const:`None` and the result does not arrive within `timeout` 87 seconds. 88 89 If the remote call raised an exception then that exception will 90 be re-raised. 91 92 """ 93 return self.backend.wait_for(self.task_id, timeout=timeout, 94 propagate=propagate, 95 interval=interval) 96 97 def wait(self, *args, **kwargs): 98 """Deprecated alias to :meth:`get`.""" 99 return self.get(*args, **kwargs) 100 101 def ready(self): 102 """Returns :const:`True` if the task has been executed. 103 104 If the task is still running, pending, or is waiting 105 for retry then :const:`False` is returned. 106 107 """ 108 return self.status in self.backend.READY_STATES 109 110 def successful(self): 111 """Returns :const:`True` if the task executed successfully.""" 112 return self.status == states.SUCCESS 113 114 def failed(self): 115 """Returns :const:`True` if the task failed.""" 116 return self.status == states.FAILURE 117 118 def __str__(self): 119 """`str(self) -> self.task_id`""" 120 return self.task_id 121 122 def __hash__(self): 123 """`hash(self) -> hash(self.task_id)`""" 124 return hash(self.task_id) 125 126 def __repr__(self): 127 return "<AsyncResult: %s>" % self.task_id 128 129 def __eq__(self, other): 130 if isinstance(other, self.__class__): 131 return self.task_id == other.task_id 132 return other == self.task_id 133 134 def __copy__(self): 135 return self.__class__(self.task_id, backend=self.backend) 136 137 def __reduce__(self): 138 if self.task_name: 139 return (_unpickle_result, (self.task_id, self.task_name)) 140 else: 141 return (self.__class__, (self.task_id, self.backend, 142 None, self.app)) 143 144 @property 145 def result(self): 146 """When the task has been executed, this contains the return value. 147 If the task raised an exception, this will be the exception 148 instance.""" 149 return self.backend.get_result(self.task_id) 150 151 @property 152 def info(self): 153 """Get state metadata. Alias to :meth:`result`.""" 154 return self.result 155 156 @property 157 def traceback(self): 158 """Get the traceback of a failed task.""" 159 return self.backend.get_traceback(self.task_id) 160 161 @property 162 def state(self): 163 """The tasks current state. 164 165 Possible values includes: 166 167 *PENDING* 168 169 The task is waiting for execution. 170 171 *STARTED* 172 173 The task has been started. 174 175 *RETRY* 176 177 The task is to be retried, possibly because of failure. 178 179 *FAILURE* 180 181 The task raised an exception, or has exceeded the retry limit. 182 The :attr:`result` attribute then contains the 183 exception raised by the task. 184 185 *SUCCESS* 186 187 The task executed successfully. The :attr:`result` attribute 188 then contains the tasks return value. 189 190 """ 191 return self.backend.get_status(self.task_id) 192 193 @property 194 def status(self): 195 """Deprecated alias of :attr:`state`.""" 196 return self.state 197 198 199 class AsyncResult(BaseAsyncResult): 200 """Pending task result using the default backend. 201 202 :param task_id: The task uuid. 203 204 """ 205 206 #: Task result store backend to use. 207 backend = None 208 209 def __init__(self, task_id, backend=None, task_name=None, app=None): 210 app = app_or_default(app) 211 backend = backend or app.backend 212 super(AsyncResult, self).__init__(task_id, backend, 213 task_name=task_name, app=app) 214 215 216 class ResultSet(object): 217 """Working with more than one result. 218 219 :param results: List of result instances. 220 221 """ 222 223 #: List of results in in the set. 224 results = None 225 226 def __init__(self, results, app=None, **kwargs): 227 self.app = app_or_default(app) 228 self.results = results 229 230 def add(self, result): 231 """Add :class:`AsyncResult` as a new member of the set. 232 233 Does nothing if the result is already a member. 234 235 """ 236 if result not in self.results: 237 self.results.append(result) 238 239 def remove(self, result): 240 """Removes result from the set; it must be a member. 241 242 :raises KeyError: if the result is not a member. 243 244 """ 245 if isinstance(result, basestring): 246 result = AsyncResult(result) 247 try: 248 self.results.remove(result) 249 except ValueError: 250 raise KeyError(result) 251 252 def discard(self, result): 253 """Remove result from the set if it is a member. 254 255 If it is not a member, do nothing. 256 257 """ 258 try: 259 self.remove(result) 260 except KeyError: 261 pass 262 263 def update(self, results): 264 """Update set with the union of itself and an iterable with 265 results.""" 266 self.results.extend(r for r in results if r not in self.results) 267 268 def clear(self): 269 """Remove all results from this set.""" 270 self.results[:] = [] # don't create new list. 271 272 def successful(self): 273 """Was all of the tasks successful? 274 275 :returns: :const:`True` if all of the tasks finished 276 successfully (i.e. did not raise an exception). 277 278 """ 279 return all(result.successful() for result in self.results) 280 281 def failed(self): 282 """Did any of the tasks fail? 283 284 :returns: :const:`True` if any of the tasks failed. 285 (i.e., raised an exception) 286 287 """ 288 return any(result.failed() for result in self.results) 289 290 def waiting(self): 291 """Are any of the tasks incomplete? 292 293 :returns: :const:`True` if any of the tasks is still 294 waiting for execution. 295 296 """ 297 return any(not result.ready() for result in self.results) 298 299 def ready(self): 300 """Did all of the tasks complete? (either by success of failure). 301 302 :returns: :const:`True` if all of the tasks been 303 executed. 304 305 """ 306 return all(result.ready() for result in self.results) 307 308 def completed_count(self): 309 """Task completion count. 310 311 :returns: the number of tasks completed. 312 313 """ 314 return sum(imap(int, (result.successful() for result in self.results))) 315 316 def forget(self): 317 """Forget about (and possible remove the result of) all the tasks.""" 318 for result in self.results: 319 result.forget() 320 321 def revoke(self, connection=None, connect_timeout=None): 322 """Revoke all tasks in the set.""" 323 with self.app.default_connection(connection, connect_timeout) as conn: 324 for result in self.results: 325 result.revoke(connection=conn) 326 327 def __iter__(self): 328 return self.iterate() 329 330 def __getitem__(self, index): 331 """`res[i] -> res.results[i]`""" 332 return self.results[index] 333 334 def iterate(self, timeout=None, propagate=True, interval=0.5): 335 """Iterate over the return values of the tasks as they finish 336 one by one. 337 338 :raises: The exception if any of the tasks raised an exception. 339 340 """ 341 elapsed = 0.0 342 results = OrderedDict((result.task_id, copy(result)) 343 for result in self.results) 344 345 while results: 346 removed = set() 347 for task_id, result in results.iteritems(): 348 if result.ready(): 349 yield result.get(timeout=timeout and timeout - elapsed, 350 propagate=propagate) 351 removed.add(task_id) 352 else: 353 if result.backend.subpolling_interval: 354 time.sleep(result.backend.subpolling_interval) 355 for task_id in removed: 356 results.pop(task_id, None) 357 time.sleep(interval) 358 elapsed += interval 359 if timeout and elapsed >= timeout: 360 raise TimeoutError("The operation timed out") 361 362 def join(self, timeout=None, propagate=True, interval=0.5): 363 """Gathers the results of all tasks as a list in order. 364 365 .. note:: 366 367 This can be an expensive operation for result store 368 backends that must resort to polling (e.g. database). 369 370 You should consider using :meth:`join_native` if your backend 371 supports it. 372 373 .. warning:: 374 375 Waiting for tasks within a task may lead to deadlocks. 376 Please see :ref:`task-synchronous-subtasks`. 377 378 :keyword timeout: The number of seconds to wait for results before 379 the operation times out. 380 381 :keyword propagate: If any of the tasks raises an exception, the 382 exception will be re-raised. 383 384 :keyword interval: Time to wait (in seconds) before retrying to 385 retrieve a result from the set. Note that this 386 does not have any effect when using the AMQP 387 result store backend, as it does not use polling. 388 389 :raises celery.exceptions.TimeoutError: if `timeout` is not 390 :const:`None` and the operation takes longer than `timeout` 391 seconds. 392 393 """ 394 time_start = time.time() 395 remaining = None 396 397 results = [] 398 for result in self.results: 399 remaining = None 400 if timeout: 401 remaining = timeout - (time.time() - time_start) 402 if remaining <= 0.0: 403 raise TimeoutError("join operation timed out") 404 results.append(result.wait(timeout=remaining, 405 propagate=propagate, 406 interval=interval)) 407 return results 408 409 def iter_native(self, timeout=None, interval=None): 410 """Backend optimized version of :meth:`iterate`. 411 412 .. versionadded:: 2.2 413 414 Note that this does not support collecting the results 415 for different task types using different backends. 416 417 This is currently only supported by the AMQP, Redis and cache 418 result backends. 419 420 """ 421 backend = self.results[0].backend 422 ids = [result.task_id for result in self.results] 423 return backend.get_many(ids, timeout=timeout, interval=interval) 424 425 def join_native(self, timeout=None, propagate=True, interval=0.5): 426 """Backend optimized version of :meth:`join`. 427 428 .. versionadded:: 2.2 429 430 Note that this does not support collecting the results 431 for different task types using different backends. 432 433 This is currently only supported by the AMQP, Redis and cache 434 result backends. 435 436 """ 437 results = self.results 438 acc = [None for _ in xrange(self.total)] 439 for task_id, meta in self.iter_native(timeout=timeout, 440 interval=interval): 441 acc[results.index(task_id)] = meta["result"] 442 return acc 443 444 @property 445 def total(self): 446 """Total number of tasks in the set.""" 447 return len(self.results) 448 449 @property 450 def subtasks(self): 451 """Deprecated alias to :attr:`results`.""" 452 return self.results 453 454 455 class TaskSetResult(ResultSet): 456 """An instance of this class is returned by 457 `TaskSet`'s :meth:`~celery.task.TaskSet.apply_async` method. 458 459 It enables inspection of the tasks state and return values as 460 a single entity. 461 462 :param taskset_id: The id of the taskset. 463 :param results: List of result instances. 464 465 """ 466 467 #: The UUID of the taskset. 468 taskset_id = None 469 470 #: List/iterator of results in the taskset 471 results = None 472 473 def __init__(self, taskset_id, results=None, **kwargs): 474 self.taskset_id = taskset_id 475 476 # XXX previously the "results" arg was named "subtasks". 477 if "subtasks" in kwargs: 478 results = kwargs["subtasks"] 479 super(TaskSetResult, self).__init__(results, **kwargs) 480 481 def save(self, backend=None): 482 """Save taskset result for later retrieval using :meth:`restore`. 483 484 Example:: 485 486 >>> result.save() 487 >>> result = TaskSetResult.restore(taskset_id) 488 489 """ 490 return (backend or self.app.backend).save_taskset(self.taskset_id, 491 self) 492 493 def delete(self, backend=None): 494 """Remove this result if it was previously saved.""" 495 (backend or self.app.backend).delete_taskset(self.taskset_id) 496 497 @classmethod 498 def restore(self, taskset_id, backend=None): 499 """Restore previously saved taskset result.""" 500 return (backend or current_app.backend).restore_taskset(taskset_id) 501 502 def itersubtasks(self): 503 """Depreacted. Use ``iter(self.results)`` instead.""" 504 return iter(self.results) 505 506 def __reduce__(self): 507 return (self.__class__, (self.taskset_id, self.results)) 508 509 510 class EagerResult(BaseAsyncResult): 511 """Result that we know has already been executed.""" 512 TimeoutError = TimeoutError 513 514 def __init__(self, task_id, ret_value, state, traceback=None): 515 self.task_id = task_id 516 self._result = ret_value 517 self._state = state 518 self._traceback = traceback 519 520 def __reduce__(self): 521 return (self.__class__, (self.task_id, self._result, 522 self._state, self._traceback)) 523 524 def __copy__(self): 525 cls, args = self.__reduce__() 526 return cls(*args) 527 528 def successful(self): 529 """Returns :const:`True` if the task executed without failure.""" 530 return self.state == states.SUCCESS 531 532 def ready(self): 533 """Returns :const:`True` if the task has been executed.""" 534 return True 535 536 def get(self, timeout=None, propagate=True, **kwargs): 537 """Wait until the task has been executed and return its result.""" 538 if self.state == states.SUCCESS: 539 return self.result 540 elif self.state in states.PROPAGATE_STATES: 541 if propagate: 542 raise self.result 543 return self.result 544 545 def revoke(self): 546 self._state = states.REVOKED 547 548 def __repr__(self): 549 return "<EagerResult: %s>" % self.task_id 550 551 @property 552 def result(self): 553 """The tasks return value""" 554 return self._result 555 556 @property 557 def state(self): 558 """The tasks state.""" 559 return self._state 560 561 @property 562 def traceback(self): 563 """The traceback if the task failed.""" 564 return self._traceback 565 566 @property 567 def status(self): 568 """The tasks status (alias to :attr:`state`).""" 569 return self._state 570 [end of celery/result.py] [start of celery/schedules.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.schedules 4 ~~~~~~~~~~~~~~~~ 5 6 Schedules define the intervals at which periodic tasks 7 should run. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 import re 16 17 from datetime import datetime, timedelta 18 from dateutil.relativedelta import relativedelta 19 20 from .utils import is_iterable 21 from .utils.timeutils import (timedelta_seconds, weekday, maybe_timedelta, 22 remaining, humanize_seconds) 23 24 25 class ParseException(Exception): 26 """Raised by crontab_parser when the input can't be parsed.""" 27 28 29 class schedule(object): 30 relative = False 31 32 def __init__(self, run_every=None, relative=False): 33 self.run_every = maybe_timedelta(run_every) 34 self.relative = relative 35 36 def remaining_estimate(self, last_run_at): 37 """Returns when the periodic task should run next as a timedelta.""" 38 return remaining(last_run_at, self.run_every, relative=self.relative) 39 40 def is_due(self, last_run_at): 41 """Returns tuple of two items `(is_due, next_time_to_run)`, 42 where next time to run is in seconds. 43 44 e.g. 45 46 * `(True, 20)`, means the task should be run now, and the next 47 time to run is in 20 seconds. 48 49 * `(False, 12)`, means the task should be run in 12 seconds. 50 51 You can override this to decide the interval at runtime, 52 but keep in mind the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, 53 which decides the maximum number of seconds celerybeat can sleep 54 between re-checking the periodic task intervals. So if you 55 dynamically change the next run at value, and the max interval is 56 set to 5 minutes, it will take 5 minutes for the change to take 57 effect, so you may consider lowering the value of 58 :setting:`CELERYBEAT_MAX_LOOP_INTERVAL` if responsiveness is of 59 importance to you. 60 61 """ 62 rem_delta = self.remaining_estimate(last_run_at) 63 rem = timedelta_seconds(rem_delta) 64 if rem == 0: 65 return True, self.seconds 66 return False, rem 67 68 def __repr__(self): 69 return "<freq: %s>" % self.human_seconds 70 71 def __eq__(self, other): 72 if isinstance(other, schedule): 73 return self.run_every == other.run_every 74 return self.run_every == other 75 76 @property 77 def seconds(self): 78 return timedelta_seconds(self.run_every) 79 80 @property 81 def human_seconds(self): 82 return humanize_seconds(self.seconds) 83 84 85 class crontab_parser(object): 86 """Parser for crontab expressions. Any expression of the form 'groups' 87 (see BNF grammar below) is accepted and expanded to a set of numbers. 88 These numbers represent the units of time that the crontab needs to 89 run on:: 90 91 digit :: '0'..'9' 92 dow :: 'a'..'z' 93 number :: digit+ | dow+ 94 steps :: number 95 range :: number ( '-' number ) ? 96 numspec :: '*' | range 97 expr :: numspec ( '/' steps ) ? 98 groups :: expr ( ',' expr ) * 99 100 The parser is a general purpose one, useful for parsing hours, minutes and 101 day_of_week expressions. Example usage:: 102 103 >>> minutes = crontab_parser(60).parse("*/15") 104 [0, 15, 30, 45] 105 >>> hours = crontab_parser(24).parse("*/4") 106 [0, 4, 8, 12, 16, 20] 107 >>> day_of_week = crontab_parser(7).parse("*") 108 [0, 1, 2, 3, 4, 5, 6] 109 110 """ 111 ParseException = ParseException 112 113 _range = r'(\w+?)-(\w+)' 114 _steps = r'/(\w+)?' 115 _star = r'\*' 116 117 def __init__(self, max_=60): 118 self.max_ = max_ 119 self.pats = ( 120 (re.compile(self._range + self._steps), self._range_steps), 121 (re.compile(self._range), self._expand_range), 122 (re.compile(self._star + self._steps), self._star_steps), 123 (re.compile('^' + self._star + '$'), self._expand_star)) 124 125 def parse(self, spec): 126 acc = set() 127 for part in spec.split(','): 128 if not part: 129 raise self.ParseException("empty part") 130 acc |= set(self._parse_part(part)) 131 return acc 132 133 def _parse_part(self, part): 134 for regex, handler in self.pats: 135 m = regex.match(part) 136 if m: 137 return handler(m.groups()) 138 return self._expand_range((part, )) 139 140 def _expand_range(self, toks): 141 fr = self._expand_number(toks[0]) 142 if len(toks) > 1: 143 to = self._expand_number(toks[1]) 144 return range(fr, min(to + 1, self.max_ + 1)) 145 return [fr] 146 147 def _range_steps(self, toks): 148 if len(toks) != 3 or not toks[2]: 149 raise self.ParseException("empty filter") 150 return self._filter_steps(self._expand_range(toks[:2]), int(toks[2])) 151 152 def _star_steps(self, toks): 153 if not toks or not toks[0]: 154 raise self.ParseException("empty filter") 155 return self._filter_steps(self._expand_star(), int(toks[0])) 156 157 def _filter_steps(self, numbers, steps): 158 return [n for n in numbers if n % steps == 0] 159 160 def _expand_star(self, *args): 161 return range(self.max_) 162 163 def _expand_number(self, s): 164 if isinstance(s, basestring) and s[0] == '-': 165 raise self.ParseException("negative numbers not supported") 166 try: 167 i = int(s) 168 except ValueError: 169 try: 170 i = weekday(s) 171 except KeyError: 172 raise ValueError("Invalid weekday literal '%s'." % s) 173 return i 174 175 176 class crontab(schedule): 177 """A crontab can be used as the `run_every` value of a 178 :class:`PeriodicTask` to add cron-like scheduling. 179 180 Like a :manpage:`cron` job, you can specify units of time of when 181 you would like the task to execute. It is a reasonably complete 182 implementation of cron's features, so it should provide a fair 183 degree of scheduling needs. 184 185 You can specify a minute, an hour, and/or a day of the week in any 186 of the following formats: 187 188 .. attribute:: minute 189 190 - A (list of) integers from 0-59 that represent the minutes of 191 an hour of when execution should occur; or 192 - A string representing a crontab pattern. This may get pretty 193 advanced, like `minute="*/15"` (for every quarter) or 194 `minute="1,13,30-45,50-59/2"`. 195 196 .. attribute:: hour 197 198 - A (list of) integers from 0-23 that represent the hours of 199 a day of when execution should occur; or 200 - A string representing a crontab pattern. This may get pretty 201 advanced, like `hour="*/3"` (for every three hours) or 202 `hour="0,8-17/2"` (at midnight, and every two hours during 203 office hours). 204 205 .. attribute:: day_of_week 206 207 - A (list of) integers from 0-6, where Sunday = 0 and Saturday = 208 6, that represent the days of a week that execution should 209 occur. 210 - A string representing a crontab pattern. This may get pretty 211 advanced, like `day_of_week="mon-fri"` (for weekdays only). 212 (Beware that `day_of_week="*/2"` does not literally mean 213 "every two days", but "every day that is divisible by two"!) 214 215 """ 216 217 @staticmethod 218 def _expand_cronspec(cronspec, max_): 219 """Takes the given cronspec argument in one of the forms:: 220 221 int (like 7) 222 basestring (like '3-5,*/15', '*', or 'monday') 223 set (like set([0,15,30,45])) 224 list (like [8-17]) 225 226 And convert it to an (expanded) set representing all time unit 227 values on which the crontab triggers. Only in case of the base 228 type being 'basestring', parsing occurs. (It is fast and 229 happens only once for each crontab instance, so there is no 230 significant performance overhead involved.) 231 232 For the other base types, merely Python type conversions happen. 233 234 The argument `max_` is needed to determine the expansion of '*'. 235 236 """ 237 if isinstance(cronspec, int): 238 result = set([cronspec]) 239 elif isinstance(cronspec, basestring): 240 result = crontab_parser(max_).parse(cronspec) 241 elif isinstance(cronspec, set): 242 result = cronspec 243 elif is_iterable(cronspec): 244 result = set(cronspec) 245 else: 246 raise TypeError( 247 "Argument cronspec needs to be of any of the " 248 "following types: int, basestring, or an iterable type. " 249 "'%s' was given." % type(cronspec)) 250 251 # assure the result does not exceed the max 252 for number in result: 253 if number >= max_: 254 raise ValueError( 255 "Invalid crontab pattern. Valid " 256 "range is 0-%d. '%d' was found." % (max_ - 1, number)) 257 258 return result 259 260 def __init__(self, minute='*', hour='*', day_of_week='*', 261 nowfun=datetime.now): 262 self._orig_minute = minute 263 self._orig_hour = hour 264 self._orig_day_of_week = day_of_week 265 self.hour = self._expand_cronspec(hour, 24) 266 self.minute = self._expand_cronspec(minute, 60) 267 self.day_of_week = self._expand_cronspec(day_of_week, 7) 268 self.nowfun = nowfun 269 270 def __repr__(self): 271 return "<crontab: %s %s %s (m/h/d)>" % (self._orig_minute or "*", 272 self._orig_hour or "*", 273 self._orig_day_of_week or "*") 274 275 def __reduce__(self): 276 return (self.__class__, (self._orig_minute, 277 self._orig_hour, 278 self._orig_day_of_week), None) 279 280 def remaining_estimate(self, last_run_at): 281 """Returns when the periodic task should run next as a timedelta.""" 282 weekday = last_run_at.isoweekday() 283 weekday = 0 if weekday == 7 else weekday # Sunday is day 0, not day 7. 284 285 execute_this_hour = (weekday in self.day_of_week and 286 last_run_at.hour in self.hour and 287 last_run_at.minute < max(self.minute)) 288 289 if execute_this_hour: 290 next_minute = min(minute for minute in self.minute 291 if minute > last_run_at.minute) 292 delta = relativedelta(minute=next_minute, 293 second=0, 294 microsecond=0) 295 else: 296 next_minute = min(self.minute) 297 execute_today = (weekday in self.day_of_week and 298 last_run_at.hour < max(self.hour)) 299 300 if execute_today: 301 next_hour = min(hour for hour in self.hour 302 if hour > last_run_at.hour) 303 delta = relativedelta(hour=next_hour, 304 minute=next_minute, 305 second=0, 306 microsecond=0) 307 else: 308 next_hour = min(self.hour) 309 next_day = min([day for day in self.day_of_week 310 if day > weekday] or 311 self.day_of_week) 312 add_week = next_day == weekday 313 314 delta = relativedelta(weeks=add_week and 1 or 0, 315 weekday=(next_day - 1) % 7, 316 hour=next_hour, 317 minute=next_minute, 318 second=0, 319 microsecond=0) 320 321 return remaining(last_run_at, delta, now=self.nowfun()) 322 323 def is_due(self, last_run_at): 324 """Returns tuple of two items `(is_due, next_time_to_run)`, 325 where next time to run is in seconds. 326 327 See :meth:`celery.schedules.schedule.is_due` for more information. 328 329 """ 330 rem_delta = self.remaining_estimate(last_run_at) 331 rem = timedelta_seconds(rem_delta) 332 due = rem == 0 333 if due: 334 rem_delta = self.remaining_estimate(last_run_at=self.nowfun()) 335 rem = timedelta_seconds(rem_delta) 336 return due, rem 337 338 def __eq__(self, other): 339 if isinstance(other, crontab): 340 return (other.day_of_week == self.day_of_week and 341 other.hour == self.hour and 342 other.minute == self.minute) 343 return other is self 344 345 346 def maybe_schedule(s, relative=False): 347 if isinstance(s, int): 348 s = timedelta(seconds=s) 349 if isinstance(s, timedelta): 350 return schedule(s, relative) 351 return s 352 [end of celery/schedules.py] [start of celery/task/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.task 4 ~~~~~~~~~~~ 5 6 Creating tasks, subtasks, sets and chords. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 import warnings 15 16 from ..app import app_or_default 17 from ..exceptions import CDeprecationWarning 18 19 from .base import Task, PeriodicTask # noqa 20 from .sets import TaskSet, subtask # noqa 21 from .chords import chord # noqa 22 from .control import discard_all # noqa 23 24 25 def task(*args, **kwargs): 26 """Decorator to create a task class out of any callable. 27 28 **Examples** 29 30 .. code-block:: python 31 32 @task 33 def refresh_feed(url): 34 return Feed.objects.get(url=url).refresh() 35 36 With setting extra options and using retry. 37 38 .. code-block:: python 39 40 @task(max_retries=10) 41 def refresh_feed(url): 42 try: 43 return Feed.objects.get(url=url).refresh() 44 except socket.error, exc: 45 refresh_feed.retry(exc=exc) 46 47 Calling the resulting task: 48 49 >>> refresh_feed("http://example.com/rss") # Regular 50 <Feed: http://example.com/rss> 51 >>> refresh_feed.delay("http://example.com/rss") # Async 52 <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d> 53 """ 54 kwargs.setdefault("accept_magic_kwargs", False) 55 return app_or_default().task(*args, **kwargs) 56 57 58 def periodic_task(*args, **options): 59 """Decorator to create a task class out of any callable. 60 61 .. admonition:: Examples 62 63 .. code-block:: python 64 65 @task 66 def refresh_feed(url): 67 return Feed.objects.get(url=url).refresh() 68 69 With setting extra options and using retry. 70 71 .. code-block:: python 72 73 @task(exchange="feeds") 74 def refresh_feed(url, **kwargs): 75 try: 76 return Feed.objects.get(url=url).refresh() 77 except socket.error, exc: 78 refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc) 79 80 Calling the resulting task: 81 82 >>> refresh_feed("http://example.com/rss") # Regular 83 <Feed: http://example.com/rss> 84 >>> refresh_feed.delay("http://example.com/rss") # Async 85 <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d> 86 87 """ 88 return task(**dict({"base": PeriodicTask}, **options)) 89 90 91 @task(name="celery.backend_cleanup") 92 def backend_cleanup(): 93 backend_cleanup.backend.cleanup() 94 95 96 class PingTask(Task): # ✞ 97 name = "celery.ping" 98 99 def run(self, **kwargs): 100 return "pong" 101 102 103 def ping(): # ✞ 104 """Deprecated and scheduled for removal in Celery 2.3. 105 106 Please use :meth:`celery.task.control.ping` instead. 107 108 """ 109 warnings.warn(CDeprecationWarning( 110 "The ping task has been deprecated and will be removed in Celery " 111 "v2.3. Please use inspect.ping instead.")) 112 return PingTask.apply_async().get() 113 [end of celery/task/__init__.py] [start of celery/task/chords.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.task.chords 4 ~~~~~~~~~~~~~~~~~~ 5 6 Chords (task set callbacks). 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 from .. import current_app 15 from ..result import TaskSetResult 16 from ..utils import uuid 17 18 from .sets import TaskSet, subtask 19 20 21 @current_app.task(name="celery.chord_unlock", max_retries=None) 22 def _unlock_chord(setid, callback, interval=1, propagate=False, 23 max_retries=None): 24 result = TaskSetResult.restore(setid) 25 if result.ready(): 26 subtask(callback).delay(result.join(propagate=propagate)) 27 result.delete() 28 else: 29 _unlock_chord.retry(countdown=interval, max_retries=max_retries) 30 31 32 class Chord(current_app.Task): 33 accept_magic_kwargs = False 34 name = "celery.chord" 35 36 def run(self, set, body, interval=1, max_retries=None, 37 propagate=False, **kwargs): 38 if not isinstance(set, TaskSet): 39 set = TaskSet(set) 40 r = [] 41 setid = uuid() 42 for task in set.tasks: 43 tid = uuid() 44 task.options.update(task_id=tid, chord=body) 45 r.append(current_app.AsyncResult(tid)) 46 current_app.TaskSetResult(setid, r).save() 47 self.backend.on_chord_apply(setid, body, interval, 48 max_retries=max_retries, 49 propagate=propagate) 50 return set.apply_async(taskset_id=setid) 51 52 53 class chord(object): 54 Chord = Chord 55 56 def __init__(self, tasks, **options): 57 self.tasks = tasks 58 self.options = options 59 60 def __call__(self, body, **options): 61 tid = body.options.setdefault("task_id", uuid()) 62 self.Chord.apply_async((list(self.tasks), body), self.options, 63 **options) 64 return body.type.app.AsyncResult(tid) 65 [end of celery/task/chords.py] [start of celery/utils/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.utils 4 ~~~~~~~~~~~~ 5 6 Utility functions. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 from __future__ import with_statement 14 15 import os 16 import sys 17 import operator 18 import imp as _imp 19 import importlib 20 import logging 21 import threading 22 import traceback 23 import warnings 24 25 from contextlib import contextmanager 26 from functools import partial, wraps 27 from inspect import getargspec 28 from itertools import islice 29 from pprint import pprint 30 31 from kombu.utils import cached_property, gen_unique_id # noqa 32 uuid = gen_unique_id 33 34 from ..exceptions import CPendingDeprecationWarning, CDeprecationWarning 35 36 from .compat import StringIO 37 from .encoding import safe_repr as _safe_repr 38 39 LOG_LEVELS = dict(logging._levelNames) 40 LOG_LEVELS["FATAL"] = logging.FATAL 41 LOG_LEVELS[logging.FATAL] = "FATAL" 42 43 PENDING_DEPRECATION_FMT = """ 44 %(description)s is scheduled for deprecation in \ 45 version %(deprecation)s and removal in version v%(removal)s. \ 46 %(alternative)s 47 """ 48 49 DEPRECATION_FMT = """ 50 %(description)s is deprecated and scheduled for removal in 51 version %(removal)s. %(alternative)s 52 """ 53 54 55 def warn_deprecated(description=None, deprecation=None, removal=None, 56 alternative=None): 57 ctx = {"description": description, 58 "deprecation": deprecation, "removal": removal, 59 "alternative": alternative} 60 if deprecation is not None: 61 w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT % ctx) 62 else: 63 w = CDeprecationWarning(DEPRECATION_FMT % ctx) 64 warnings.warn(w) 65 66 67 def deprecated(description=None, deprecation=None, removal=None, 68 alternative=None): 69 70 def _inner(fun): 71 72 @wraps(fun) 73 def __inner(*args, **kwargs): 74 warn_deprecated(description=description or get_full_cls_name(fun), 75 deprecation=deprecation, 76 removal=removal, 77 alternative=alternative) 78 return fun(*args, **kwargs) 79 return __inner 80 return _inner 81 82 83 def lpmerge(L, R): 84 """Left precedent dictionary merge. Keeps values from `l`, if the value 85 in `r` is :const:`None`.""" 86 return dict(L, **dict((k, v) for k, v in R.iteritems() if v is not None)) 87 88 89 class promise(object): 90 """A promise. 91 92 Evaluated when called or if the :meth:`evaluate` method is called. 93 The function is evaluated on every access, so the value is not 94 memoized (see :class:`mpromise`). 95 96 Overloaded operations that will evaluate the promise: 97 :meth:`__str__`, :meth:`__repr__`, :meth:`__cmp__`. 98 99 """ 100 101 def __init__(self, fun, *args, **kwargs): 102 self._fun = fun 103 self._args = args 104 self._kwargs = kwargs 105 106 def __call__(self): 107 return self.evaluate() 108 109 def evaluate(self): 110 return self._fun(*self._args, **self._kwargs) 111 112 def __str__(self): 113 return str(self()) 114 115 def __repr__(self): 116 return repr(self()) 117 118 def __cmp__(self, rhs): 119 if isinstance(rhs, self.__class__): 120 return -cmp(rhs, self()) 121 return cmp(self(), rhs) 122 123 def __eq__(self, rhs): 124 return self() == rhs 125 126 def __deepcopy__(self, memo): 127 memo[id(self)] = self 128 return self 129 130 def __reduce__(self): 131 return (self.__class__, (self._fun, ), {"_args": self._args, 132 "_kwargs": self._kwargs}) 133 134 135 class mpromise(promise): 136 """Memoized promise. 137 138 The function is only evaluated once, every subsequent access 139 will return the same value. 140 141 .. attribute:: evaluated 142 143 Set to to :const:`True` after the promise has been evaluated. 144 145 """ 146 evaluated = False 147 _value = None 148 149 def evaluate(self): 150 if not self.evaluated: 151 self._value = super(mpromise, self).evaluate() 152 self.evaluated = True 153 return self._value 154 155 156 def maybe_promise(value): 157 """Evaluates if the value is a promise.""" 158 if isinstance(value, promise): 159 return value.evaluate() 160 return value 161 162 163 def noop(*args, **kwargs): 164 """No operation. 165 166 Takes any arguments/keyword arguments and does nothing. 167 168 """ 169 pass 170 171 172 if sys.version_info >= (3, 0): 173 174 def kwdict(kwargs): 175 return kwargs 176 else: 177 def kwdict(kwargs): # noqa 178 """Make sure keyword arguments are not in unicode. 179 180 This should be fixed in newer Python versions, 181 see: http://bugs.python.org/issue4978. 182 183 """ 184 return dict((key.encode("utf-8"), value) 185 for key, value in kwargs.items()) 186 187 188 def first(predicate, iterable): 189 """Returns the first element in `iterable` that `predicate` returns a 190 :const:`True` value for.""" 191 for item in iterable: 192 if predicate(item): 193 return item 194 195 196 def firstmethod(method): 197 """Returns a functions that with a list of instances, 198 finds the first instance that returns a value for the given method. 199 200 The list can also contain promises (:class:`promise`.) 201 202 """ 203 204 def _matcher(seq, *args, **kwargs): 205 for cls in seq: 206 try: 207 answer = getattr(maybe_promise(cls), method)(*args, **kwargs) 208 if answer is not None: 209 return answer 210 except AttributeError: 211 pass 212 return _matcher 213 214 215 def chunks(it, n): 216 """Split an iterator into chunks with `n` elements each. 217 218 Examples 219 220 # n == 2 221 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) 222 >>> list(x) 223 [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] 224 225 # n == 3 226 >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) 227 >>> list(x) 228 [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] 229 230 """ 231 for first in it: 232 yield [first] + list(islice(it, n - 1)) 233 234 235 def padlist(container, size, default=None): 236 """Pad list with default elements. 237 238 Examples: 239 240 >>> first, last, city = padlist(["George", "Costanza", "NYC"], 3) 241 ("George", "Costanza", "NYC") 242 >>> first, last, city = padlist(["George", "Costanza"], 3) 243 ("George", "Costanza", None) 244 >>> first, last, city, planet = padlist(["George", "Costanza", 245 "NYC"], 4, default="Earth") 246 ("George", "Costanza", "NYC", "Earth") 247 248 """ 249 return list(container)[:size] + [default] * (size - len(container)) 250 251 252 def is_iterable(obj): 253 try: 254 iter(obj) 255 except TypeError: 256 return False 257 return True 258 259 260 def mattrgetter(*attrs): 261 """Like :func:`operator.itemgetter` but returns :const:`None` on missing 262 attributes instead of raising :exc:`AttributeError`.""" 263 return lambda obj: dict((attr, getattr(obj, attr, None)) 264 for attr in attrs) 265 266 267 def get_full_cls_name(cls): 268 """With a class, get its full module and class name.""" 269 return ".".join([cls.__module__, 270 cls.__name__]) 271 272 273 def fun_takes_kwargs(fun, kwlist=[]): 274 """With a function, and a list of keyword arguments, returns arguments 275 in the list which the function takes. 276 277 If the object has an `argspec` attribute that is used instead 278 of using the :meth:`inspect.getargspec` introspection. 279 280 :param fun: The function to inspect arguments of. 281 :param kwlist: The list of keyword arguments. 282 283 Examples 284 285 >>> def foo(self, x, y, logfile=None, loglevel=None): 286 ... return x * y 287 >>> fun_takes_kwargs(foo, ["logfile", "loglevel", "task_id"]) 288 ["logfile", "loglevel"] 289 290 >>> def foo(self, x, y, **kwargs): 291 >>> fun_takes_kwargs(foo, ["logfile", "loglevel", "task_id"]) 292 ["logfile", "loglevel", "task_id"] 293 294 """ 295 argspec = getattr(fun, "argspec", getargspec(fun)) 296 args, _varargs, keywords, _defaults = argspec 297 if keywords != None: 298 return kwlist 299 return filter(partial(operator.contains, args), kwlist) 300 301 302 def get_cls_by_name(name, aliases={}, imp=None, package=None, **kwargs): 303 """Get class by name. 304 305 The name should be the full dot-separated path to the class:: 306 307 modulename.ClassName 308 309 Example:: 310 311 celery.concurrency.processes.TaskPool 312 ^- class name 313 314 If `aliases` is provided, a dict containing short name/long name 315 mappings, the name is looked up in the aliases first. 316 317 Examples: 318 319 >>> get_cls_by_name("celery.concurrency.processes.TaskPool") 320 <class 'celery.concurrency.processes.TaskPool'> 321 322 >>> get_cls_by_name("default", { 323 ... "default": "celery.concurrency.processes.TaskPool"}) 324 <class 'celery.concurrency.processes.TaskPool'> 325 326 # Does not try to look up non-string names. 327 >>> from celery.concurrency.processes import TaskPool 328 >>> get_cls_by_name(TaskPool) is TaskPool 329 True 330 331 """ 332 if imp is None: 333 imp = importlib.import_module 334 335 if not isinstance(name, basestring): 336 return name # already a class 337 338 name = aliases.get(name) or name 339 module_name, _, cls_name = name.rpartition(".") 340 if not module_name and package: 341 module_name = package 342 try: 343 module = imp(module_name, package=package, **kwargs) 344 except ValueError, exc: 345 raise ValueError("Couldn't import %r: %s" % (name, exc)) 346 return getattr(module, cls_name) 347 348 get_symbol_by_name = get_cls_by_name 349 350 351 def instantiate(name, *args, **kwargs): 352 """Instantiate class by name. 353 354 See :func:`get_cls_by_name`. 355 356 """ 357 return get_cls_by_name(name)(*args, **kwargs) 358 359 360 def truncate_text(text, maxlen=128, suffix="..."): 361 """Truncates text to a maximum number of characters.""" 362 if len(text) >= maxlen: 363 return text[:maxlen].rsplit(" ", 1)[0] + suffix 364 return text 365 366 367 def abbr(S, max, ellipsis="..."): 368 if S is None: 369 return "???" 370 if len(S) > max: 371 return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max] 372 return S 373 374 375 def abbrtask(S, max): 376 if S is None: 377 return "???" 378 if len(S) > max: 379 module, _, cls = S.rpartition(".") 380 module = abbr(module, max - len(cls) - 3, False) 381 return module + "[.]" + cls 382 return S 383 384 385 def isatty(fh): 386 # Fixes bug with mod_wsgi: 387 # mod_wsgi.Log object has no attribute isatty. 388 return getattr(fh, "isatty", None) and fh.isatty() 389 390 391 def textindent(t, indent=0): 392 """Indent text.""" 393 return "\n".join(" " * indent + p for p in t.split("\n")) 394 395 396 @contextmanager 397 def cwd_in_path(): 398 cwd = os.getcwd() 399 if cwd in sys.path: 400 yield 401 else: 402 sys.path.insert(0, cwd) 403 try: 404 yield cwd 405 finally: 406 try: 407 sys.path.remove(cwd) 408 except ValueError: 409 pass 410 411 412 def find_module(module, path=None, imp=None): 413 """Version of :func:`imp.find_module` supporting dots.""" 414 if imp is None: 415 imp = importlib.import_module 416 with cwd_in_path(): 417 if "." in module: 418 last = None 419 parts = module.split(".") 420 for i, part in enumerate(parts[:-1]): 421 path = imp(".".join(parts[:i + 1])).__path__ 422 last = _imp.find_module(parts[i + 1], path) 423 return last 424 return _imp.find_module(module) 425 426 427 def import_from_cwd(module, imp=None, package=None): 428 """Import module, but make sure it finds modules 429 located in the current directory. 430 431 Modules located in the current directory has 432 precedence over modules located in `sys.path`. 433 """ 434 if imp is None: 435 imp = importlib.import_module 436 with cwd_in_path(): 437 return imp(module, package=package) 438 439 440 def cry(): # pragma: no cover 441 """Return stacktrace of all active threads. 442 443 From https://gist.github.com/737056 444 445 """ 446 tmap = {} 447 main_thread = None 448 # get a map of threads by their ID so we can print their names 449 # during the traceback dump 450 for t in threading.enumerate(): 451 if getattr(t, "ident", None): 452 tmap[t.ident] = t 453 else: 454 main_thread = t 455 456 out = StringIO() 457 sep = "=" * 49 + "\n" 458 for tid, frame in sys._current_frames().iteritems(): 459 thread = tmap.get(tid, main_thread) 460 if not thread: 461 # skip old junk (left-overs from a fork) 462 continue 463 out.write("%s\n" % (thread.getName(), )) 464 out.write(sep) 465 traceback.print_stack(frame, file=out) 466 out.write(sep) 467 out.write("LOCAL VARIABLES\n") 468 out.write(sep) 469 pprint(frame.f_locals, stream=out) 470 out.write("\n\n") 471 return out.getvalue() 472 473 474 def reprkwargs(kwargs, sep=', ', fmt="%s=%s"): 475 return sep.join(fmt % (k, _safe_repr(v)) for k, v in kwargs.iteritems()) 476 477 478 def reprcall(name, args=(), kwargs=(), sep=', '): 479 return "%s(%s%s%s)" % (name, sep.join(map(_safe_repr, args)), 480 (args and kwargs) and sep or "", 481 reprkwargs(kwargs, sep)) 482 [end of celery/utils/__init__.py] [start of celery/utils/encoding.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.utils.encoding 4 ~~~~~~~~~~~~~~~~~~~~~ 5 6 Utilities to encode text, and to safely emit text from running 7 applications without crashing with the infamous :exc:`UnicodeDecodeError` 8 exception. 9 10 :copyright: (c) 2009 - 2011 by Ask Solem. 11 :license: BSD, see LICENSE for more details. 12 13 """ 14 from __future__ import absolute_import 15 16 import sys 17 import traceback 18 19 is_py3k = sys.version_info >= (3, 0) 20 21 22 if is_py3k: 23 24 def str_to_bytes(s): 25 if isinstance(s, str): 26 return s.encode() 27 return s 28 29 def bytes_to_str(s): 30 if isinstance(s, bytes): 31 return s.decode() 32 return s 33 34 def from_utf8(s, *args, **kwargs): 35 return s 36 37 def ensure_bytes(s): 38 if not isinstance(s, bytes): 39 return str_to_bytes(s) 40 return s 41 42 str_t = str 43 bytes_t = bytes 44 45 else: 46 47 def str_to_bytes(s): # noqa 48 if isinstance(s, unicode): 49 return s.encode() 50 return s 51 52 def bytes_to_str(s): # noqa 53 return s 54 55 def from_utf8(s, *args, **kwargs): # noqa 56 return s.encode("utf-8", *args, **kwargs) 57 58 str_t = unicode 59 bytes_t = str 60 ensure_bytes = str_to_bytes 61 62 63 if sys.platform.startswith("java"): 64 65 def default_encoding(): 66 return "utf-8" 67 else: 68 69 def default_encoding(): # noqa 70 return sys.getfilesystemencoding() 71 72 73 def safe_str(s, errors="replace"): 74 s = bytes_to_str(s) 75 if not isinstance(s, basestring): 76 return safe_repr(s, errors) 77 return _safe_str(s, errors) 78 79 80 def _safe_str(s, errors="replace"): 81 if is_py3k: 82 return s 83 encoding = default_encoding() 84 try: 85 if isinstance(s, unicode): 86 return s.encode(encoding, errors) 87 return unicode(s, encoding, errors) 88 except Exception, exc: 89 return "<Unrepresentable %r: %r %r>" % ( 90 type(s), exc, "\n".join(traceback.format_stack())) 91 92 93 def safe_repr(o, errors="replace"): 94 try: 95 return repr(o) 96 except Exception: 97 return _safe_str(o, errors) 98 [end of celery/utils/encoding.py] [start of celery/utils/timer2.py] 1 # -*- coding: utf-8 -*- 2 """ 3 timer2 4 ~~~~~~ 5 6 Scheduler for Python functions. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 from __future__ import with_statement 14 15 import atexit 16 import heapq 17 import logging 18 import os 19 import sys 20 import traceback 21 import warnings 22 23 from itertools import count 24 from threading import Condition, Event, Lock, Thread 25 from time import time, sleep, mktime 26 27 from datetime import datetime, timedelta 28 29 VERSION = (1, 0, 0) 30 __version__ = ".".join(map(str, VERSION)) 31 __author__ = "Ask Solem" 32 __contact__ = "ask@celeryproject.org" 33 __homepage__ = "http://github.com/ask/timer2/" 34 __docformat__ = "restructuredtext" 35 36 DEFAULT_MAX_INTERVAL = 2 37 38 39 class TimedFunctionFailed(UserWarning): 40 pass 41 42 43 class Entry(object): 44 cancelled = False 45 46 def __init__(self, fun, args=None, kwargs=None): 47 self.fun = fun 48 self.args = args or [] 49 self.kwargs = kwargs or {} 50 self.tref = self 51 52 def __call__(self): 53 return self.fun(*self.args, **self.kwargs) 54 55 def cancel(self): 56 self.tref.cancelled = True 57 58 def __repr__(self): 59 return "<TimerEntry: %s(*%r, **%r)" % ( 60 self.fun.__name__, self.args, self.kwargs) 61 62 if sys.version_info >= (3, 0): 63 64 def __hash__(self): 65 return hash("|".join(map(repr, (self.fun, self.args, 66 self.kwargs)))) 67 68 def __lt__(self, other): 69 return hash(self) < hash(other) 70 71 def __gt__(self, other): 72 return hash(self) > hash(other) 73 74 def __eq__(self, other): 75 return hash(self) == hash(other) 76 77 78 def to_timestamp(d): 79 if isinstance(d, datetime): 80 return mktime(d.timetuple()) 81 return d 82 83 84 class Schedule(object): 85 """ETA scheduler.""" 86 on_error = None 87 88 def __init__(self, max_interval=DEFAULT_MAX_INTERVAL, on_error=None): 89 self.max_interval = float(max_interval) 90 self.on_error = on_error or self.on_error 91 self._queue = [] 92 93 def handle_error(self, exc_info): 94 if self.on_error: 95 self.on_error(exc_info) 96 return True 97 98 def enter(self, entry, eta=None, priority=0): 99 """Enter function into the scheduler. 100 101 :param entry: Item to enter. 102 :keyword eta: Scheduled time as a :class:`datetime.datetime` object. 103 :keyword priority: Unused. 104 105 """ 106 if eta is None: # schedule now 107 eta = datetime.now() 108 109 try: 110 eta = to_timestamp(eta) 111 except OverflowError: 112 if not self.handle_error(sys.exc_info()): 113 raise 114 115 heapq.heappush(self._queue, (eta, priority, entry)) 116 return entry 117 118 def __iter__(self): 119 """The iterator yields the time to sleep for between runs.""" 120 121 # localize variable access 122 nowfun = time 123 pop = heapq.heappop 124 max_interval = self.max_interval 125 queue = self._queue 126 127 while 1: 128 if queue: 129 eta, priority, entry = verify = queue[0] 130 now = nowfun() 131 132 if now < eta: 133 yield min(eta - now, max_interval), None 134 else: 135 event = pop(queue) 136 137 if event is verify: 138 if not entry.cancelled: 139 yield None, entry 140 continue 141 else: 142 heapq.heappush(queue, event) 143 yield None, None 144 145 def empty(self): 146 """Is the schedule empty?""" 147 return not self._queue 148 149 def clear(self): 150 self._queue[:] = [] # used because we can't replace the object 151 # and the operation is atomic. 152 153 def info(self): 154 return ({"eta": eta, "priority": priority, "item": item} 155 for eta, priority, item in self.queue) 156 157 @property 158 def queue(self): 159 events = list(self._queue) 160 return map(heapq.heappop, [events] * len(events)) 161 162 163 class Timer(Thread): 164 Entry = Entry 165 Schedule = Schedule 166 167 running = False 168 on_tick = None 169 _timer_count = count(1).next 170 171 def __init__(self, schedule=None, on_error=None, on_tick=None, **kwargs): 172 self.schedule = schedule or self.Schedule(on_error=on_error) 173 self.on_tick = on_tick or self.on_tick 174 175 Thread.__init__(self) 176 self._is_shutdown = Event() 177 self._is_stopped = Event() 178 self.mutex = Lock() 179 self.logger = logging.getLogger("timer2.Timer") 180 self.not_empty = Condition(self.mutex) 181 self.setDaemon(True) 182 self.setName("Timer-%s" % (self._timer_count(), )) 183 184 def apply_entry(self, entry): 185 try: 186 entry() 187 except Exception, exc: 188 typ, val, tb = einfo = sys.exc_info() 189 if not self.schedule.handle_error(einfo): 190 warnings.warn(TimedFunctionFailed(repr(exc))), 191 traceback.print_exception(typ, val, tb) 192 193 def _next_entry(self): 194 with self.not_empty: 195 delay, entry = self.scheduler.next() 196 if entry is None: 197 if delay is None: 198 self.not_empty.wait(1.0) 199 return delay 200 return self.apply_entry(entry) 201 __next__ = next = _next_entry # for 2to3 202 203 def run(self): 204 try: 205 self.running = True 206 self.scheduler = iter(self.schedule) 207 208 while not self._is_shutdown.isSet(): 209 delay = self._next_entry() 210 if delay: 211 if self.on_tick: 212 self.on_tick(delay) 213 if sleep is None: # pragma: no cover 214 break 215 sleep(delay) 216 try: 217 self._is_stopped.set() 218 except TypeError: # pragma: no cover 219 # we lost the race at interpreter shutdown, 220 # so gc collected built-in modules. 221 pass 222 except Exception, exc: 223 self.logger.error("Thread Timer crashed: %r", exc, 224 exc_info=sys.exc_info()) 225 os._exit(1) 226 227 def stop(self): 228 if self.running: 229 self._is_shutdown.set() 230 self._is_stopped.wait() 231 self.join(1e10) 232 self.running = False 233 234 def ensure_started(self): 235 if not self.running and not self.isAlive(): 236 self.start() 237 238 def enter(self, entry, eta, priority=None): 239 self.ensure_started() 240 with self.mutex: 241 entry = self.schedule.enter(entry, eta, priority) 242 self.not_empty.notify() 243 return entry 244 245 def apply_at(self, eta, fun, args=(), kwargs={}, priority=0): 246 return self.enter(self.Entry(fun, args, kwargs), eta, priority) 247 248 def enter_after(self, msecs, entry, priority=0): 249 eta = datetime.now() + timedelta(seconds=msecs / 1000.0) 250 return self.enter(entry, eta, priority) 251 252 def apply_after(self, msecs, fun, args=(), kwargs={}, priority=0): 253 return self.enter_after(msecs, Entry(fun, args, kwargs), priority) 254 255 def apply_interval(self, msecs, fun, args=(), kwargs={}, priority=0): 256 tref = Entry(fun, args, kwargs) 257 258 def _reschedules(*args, **kwargs): 259 try: 260 return fun(*args, **kwargs) 261 finally: 262 if not tref.cancelled: 263 self.enter_after(msecs, tref, priority) 264 265 tref.fun = _reschedules 266 return self.enter_after(msecs, tref, priority) 267 268 def exit_after(self, msecs, priority=10): 269 self.apply_after(msecs, sys.exit, priority) 270 271 def cancel(self, tref): 272 tref.cancel() 273 274 def clear(self): 275 self.schedule.clear() 276 277 def empty(self): 278 return self.schedule.empty() 279 280 @property 281 def queue(self): 282 return self.schedule.queue 283 284 default_timer = _default_timer = Timer() 285 apply_after = _default_timer.apply_after 286 apply_at = _default_timer.apply_at 287 apply_interval = _default_timer.apply_interval 288 enter_after = _default_timer.enter_after 289 enter = _default_timer.enter 290 exit_after = _default_timer.exit_after 291 cancel = _default_timer.cancel 292 clear = _default_timer.clear 293 294 atexit.register(_default_timer.stop) 295 [end of celery/utils/timer2.py] [start of celery/utils/timeutils.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.utils.timeutils 4 ~~~~~~~~~~~~~~~~~~~~~~ 5 6 This module contains various utilities relating to dates and times. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 from datetime import datetime, timedelta 15 from dateutil.parser import parse as parse_iso8601 16 17 DAYNAMES = "sun", "mon", "tue", "wed", "thu", "fri", "sat" 18 WEEKDAYS = dict((name, dow) for name, dow in zip(DAYNAMES, range(7))) 19 20 RATE_MODIFIER_MAP = {"s": lambda n: n, 21 "m": lambda n: n / 60.0, 22 "h": lambda n: n / 60.0 / 60.0} 23 24 HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, "total_seconds") 25 26 TIME_UNITS = (("day", 60 * 60 * 24.0, lambda n: "%.2f" % n), 27 ("hour", 60 * 60.0, lambda n: "%.2f" % n), 28 ("minute", 60.0, lambda n: "%.2f" % n), 29 ("second", 1.0, lambda n: "%.2f" % n)) 30 31 32 def maybe_timedelta(delta): 33 """Coerces integer to timedelta if `delta` is an integer.""" 34 if isinstance(delta, (int, float)): 35 return timedelta(seconds=delta) 36 return delta 37 38 39 if HAVE_TIMEDELTA_TOTAL_SECONDS: # pragma: no cover 40 41 def timedelta_seconds(delta): 42 """Convert :class:`datetime.timedelta` to seconds. 43 44 Doesn't account for negative values. 45 46 """ 47 return max(delta.total_seconds(), 0) 48 else: # pragma: no cover 49 50 def timedelta_seconds(delta): # noqa 51 """Convert :class:`datetime.timedelta` to seconds. 52 53 Doesn't account for negative values. 54 55 """ 56 if delta.days < 0: 57 return 0 58 return delta.days * 86400 + delta.seconds + (delta.microseconds / 10e5) 59 60 61 def delta_resolution(dt, delta): 62 """Round a datetime to the resolution of a timedelta. 63 64 If the timedelta is in days, the datetime will be rounded 65 to the nearest days, if the timedelta is in hours the datetime 66 will be rounded to the nearest hour, and so on until seconds 67 which will just return the original datetime. 68 69 """ 70 delta = timedelta_seconds(delta) 71 72 resolutions = ((3, lambda x: x / 86400), 73 (4, lambda x: x / 3600), 74 (5, lambda x: x / 60)) 75 76 args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second 77 for res, predicate in resolutions: 78 if predicate(delta) >= 1.0: 79 return datetime(*args[:res]) 80 return dt 81 82 83 def remaining(start, ends_in, now=None, relative=False): 84 """Calculate the remaining time for a start date and a timedelta. 85 86 e.g. "how many seconds left for 30 seconds after start?" 87 88 :param start: Start :class:`~datetime.datetime`. 89 :param ends_in: The end delta as a :class:`~datetime.timedelta`. 90 :keyword relative: If enabled the end time will be 91 calculated using :func:`delta_resolution` (i.e. rounded to the 92 resolution of `ends_in`). 93 :keyword now: Function returning the current time and date, 94 defaults to :func:`datetime.now`. 95 96 """ 97 now = now or datetime.now() 98 99 end_date = start + ends_in 100 if relative: 101 end_date = delta_resolution(end_date, ends_in) 102 return end_date - now 103 104 105 def rate(rate): 106 """Parses rate strings, such as `"100/m"` or `"2/h"` 107 and converts them to seconds.""" 108 if rate: 109 if isinstance(rate, basestring): 110 ops, _, modifier = rate.partition("/") 111 return RATE_MODIFIER_MAP[modifier or "s"](int(ops)) or 0 112 return rate or 0 113 return 0 114 115 116 def weekday(name): 117 """Return the position of a weekday (0 - 7, where 0 is Sunday). 118 119 Example:: 120 121 >>> weekday("sunday"), weekday("sun"), weekday("mon") 122 (0, 0, 1) 123 124 """ 125 abbreviation = name[0:3].lower() 126 try: 127 return WEEKDAYS[abbreviation] 128 except KeyError: 129 # Show original day name in exception, instead of abbr. 130 raise KeyError(name) 131 132 133 def humanize_seconds(secs, prefix=""): 134 """Show seconds in human form, e.g. 60 is "1 minute", 7200 is "2 135 hours".""" 136 secs = float(secs) 137 for unit, divider, formatter in TIME_UNITS: 138 if secs >= divider: 139 w = secs / divider 140 punit = w > 1 and (unit + "s") or unit 141 return "%s%s %s" % (prefix, formatter(w), punit) 142 return "now" 143 144 145 def maybe_iso8601(dt): 146 """`Either datetime | str -> datetime or None -> None`""" 147 if not dt: 148 return 149 if isinstance(dt, datetime): 150 return dt 151 return parse_iso8601(dt) 152 [end of celery/utils/timeutils.py] [start of celery/worker/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker 4 ~~~~~~~~~~~~~ 5 6 The worker. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 import atexit 15 import logging 16 import socket 17 import sys 18 import threading 19 import traceback 20 21 from kombu.utils.finalize import Finalize 22 23 from .. import beat 24 from .. import concurrency as _concurrency 25 from .. import registry, platforms, signals 26 from ..app import app_or_default 27 from ..exceptions import SystemTerminate 28 from ..log import SilenceRepeated 29 from ..utils import noop, instantiate 30 31 from . import state 32 from .buckets import TaskBucket, FastQueue 33 34 RUN = 0x1 35 CLOSE = 0x2 36 TERMINATE = 0x3 37 38 #: List of signals to reset when a child process starts. 39 WORKER_SIGRESET = frozenset(["SIGTERM", 40 "SIGHUP", 41 "SIGTTIN", 42 "SIGTTOU", 43 "SIGUSR1"]) 44 45 #: List of signals to ignore when a child process starts. 46 WORKER_SIGIGNORE = frozenset(["SIGINT"]) 47 48 49 def process_initializer(app, hostname): 50 """Initializes the process so it can be used to process tasks. 51 52 Used for multiprocessing environments. 53 54 """ 55 app = app_or_default(app) 56 app.set_current() 57 platforms.signals.reset(*WORKER_SIGRESET) 58 platforms.signals.ignore(*WORKER_SIGIGNORE) 59 platforms.set_mp_process_title("celeryd", hostname=hostname) 60 61 # This is for Windows and other platforms not supporting 62 # fork(). Note that init_worker makes sure it's only 63 # run once per process. 64 app.loader.init_worker() 65 app.loader.init_worker_process() 66 67 signals.worker_process_init.send(sender=None) 68 69 70 class WorkController(object): 71 """Unmanaged worker instance.""" 72 RUN = RUN 73 CLOSE = CLOSE 74 TERMINATE = TERMINATE 75 76 #: The number of simultaneous processes doing work (default: 77 #: :setting:`CELERYD_CONCURRENCY`) 78 concurrency = None 79 80 #: The loglevel used (default: :const:`logging.INFO`) 81 loglevel = logging.ERROR 82 83 #: The logfile used, if no logfile is specified it uses `stderr` 84 #: (default: :setting:`CELERYD_LOG_FILE`). 85 logfile = None 86 87 #: If :const:`True`, celerybeat is embedded, running in the main worker 88 #: process as a thread. 89 embed_clockservice = None 90 91 #: Enable the sending of monitoring events, these events can be captured 92 #: by monitors (celerymon). 93 send_events = False 94 95 #: The :class:`logging.Logger` instance used for logging. 96 logger = None 97 98 #: The pool instance used. 99 pool = None 100 101 #: The internal queue object that holds tasks ready for immediate 102 #: processing. 103 ready_queue = None 104 105 #: Instance of :class:`celery.worker.mediator.Mediator`. 106 mediator = None 107 108 #: Consumer instance. 109 consumer = None 110 111 _state = None 112 _running = 0 113 114 def __init__(self, concurrency=None, logfile=None, loglevel=None, 115 send_events=None, hostname=None, ready_callback=noop, 116 embed_clockservice=False, pool_cls=None, consumer_cls=None, 117 mediator_cls=None, eta_scheduler_cls=None, 118 schedule_filename=None, task_time_limit=None, 119 task_soft_time_limit=None, max_tasks_per_child=None, 120 pool_putlocks=None, db=None, prefetch_multiplier=None, 121 eta_scheduler_precision=None, disable_rate_limits=None, 122 autoscale=None, autoscaler_cls=None, scheduler_cls=None, 123 app=None): 124 125 self.app = app_or_default(app) 126 conf = self.app.conf 127 self._shutdown_complete = threading.Event() 128 129 # Options 130 self.loglevel = loglevel or self.loglevel 131 self.concurrency = concurrency or conf.CELERYD_CONCURRENCY 132 self.logfile = logfile or conf.CELERYD_LOG_FILE 133 self.logger = self.app.log.get_default_logger() 134 if send_events is None: 135 send_events = conf.CELERY_SEND_EVENTS 136 self.send_events = send_events 137 self.pool_cls = _concurrency.get_implementation( 138 pool_cls or conf.CELERYD_POOL) 139 self.consumer_cls = consumer_cls or conf.CELERYD_CONSUMER 140 self.mediator_cls = mediator_cls or conf.CELERYD_MEDIATOR 141 self.eta_scheduler_cls = eta_scheduler_cls or \ 142 conf.CELERYD_ETA_SCHEDULER 143 144 self.autoscaler_cls = autoscaler_cls or \ 145 conf.CELERYD_AUTOSCALER 146 self.schedule_filename = schedule_filename or \ 147 conf.CELERYBEAT_SCHEDULE_FILENAME 148 self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER 149 self.hostname = hostname or socket.gethostname() 150 self.embed_clockservice = embed_clockservice 151 self.ready_callback = ready_callback 152 self.task_time_limit = task_time_limit or \ 153 conf.CELERYD_TASK_TIME_LIMIT 154 self.task_soft_time_limit = task_soft_time_limit or \ 155 conf.CELERYD_TASK_SOFT_TIME_LIMIT 156 self.max_tasks_per_child = max_tasks_per_child or \ 157 conf.CELERYD_MAX_TASKS_PER_CHILD 158 self.pool_putlocks = pool_putlocks or \ 159 conf.CELERYD_POOL_PUTLOCKS 160 self.eta_scheduler_precision = eta_scheduler_precision or \ 161 conf.CELERYD_ETA_SCHEDULER_PRECISION 162 self.prefetch_multiplier = prefetch_multiplier or \ 163 conf.CELERYD_PREFETCH_MULTIPLIER 164 self.timer_debug = SilenceRepeated(self.logger.debug, 165 max_iterations=10) 166 self.db = db or conf.CELERYD_STATE_DB 167 self.disable_rate_limits = disable_rate_limits or \ 168 conf.CELERY_DISABLE_RATE_LIMITS 169 self._finalize = Finalize(self, self.stop, exitpriority=1) 170 self._finalize_db = None 171 172 if self.db: 173 self._persistence = state.Persistent(self.db) 174 atexit.register(self._persistence.save) 175 176 # Queues 177 if not self.pool_cls.rlimit_safe: 178 self.disable_rate_limits = True 179 if self.disable_rate_limits: 180 self.ready_queue = FastQueue() 181 self.ready_queue.put = self.process_task 182 else: 183 self.ready_queue = TaskBucket(task_registry=registry.tasks) 184 185 self.logger.debug("Instantiating thread components...") 186 187 # Threads + Pool + Consumer 188 self.autoscaler = None 189 max_concurrency = None 190 min_concurrency = concurrency 191 if autoscale: 192 max_concurrency, min_concurrency = autoscale 193 194 self.pool = instantiate(self.pool_cls, min_concurrency, 195 logger=self.logger, 196 initializer=process_initializer, 197 initargs=(self.app, self.hostname), 198 maxtasksperchild=self.max_tasks_per_child, 199 timeout=self.task_time_limit, 200 soft_timeout=self.task_soft_time_limit, 201 putlocks=self.pool_putlocks) 202 self.priority_timer = instantiate(self.pool.Timer) 203 204 if not self.eta_scheduler_cls: 205 # Default Timer is set by the pool, as e.g. eventlet 206 # needs a custom implementation. 207 self.eta_scheduler_cls = self.pool.Timer 208 209 self.autoscaler = None 210 if autoscale: 211 self.autoscaler = instantiate(self.autoscaler_cls, self.pool, 212 max_concurrency=max_concurrency, 213 min_concurrency=min_concurrency, 214 logger=self.logger) 215 216 self.mediator = None 217 if not self.disable_rate_limits: 218 self.mediator = instantiate(self.mediator_cls, self.ready_queue, 219 app=self.app, 220 callback=self.process_task, 221 logger=self.logger) 222 223 self.scheduler = instantiate(self.eta_scheduler_cls, 224 precision=eta_scheduler_precision, 225 on_error=self.on_timer_error, 226 on_tick=self.on_timer_tick) 227 228 self.beat = None 229 if self.embed_clockservice: 230 self.beat = beat.EmbeddedService(app=self.app, 231 logger=self.logger, 232 schedule_filename=self.schedule_filename, 233 scheduler_cls=self.scheduler_cls) 234 235 prefetch_count = self.concurrency * self.prefetch_multiplier 236 self.consumer = instantiate(self.consumer_cls, 237 self.ready_queue, 238 self.scheduler, 239 logger=self.logger, 240 hostname=self.hostname, 241 send_events=self.send_events, 242 init_callback=self.ready_callback, 243 initial_prefetch_count=prefetch_count, 244 pool=self.pool, 245 priority_timer=self.priority_timer, 246 app=self.app, 247 controller=self) 248 249 # The order is important here; 250 # the first in the list is the first to start, 251 # and they must be stopped in reverse order. 252 self.components = filter(None, (self.pool, 253 self.mediator, 254 self.scheduler, 255 self.beat, 256 self.autoscaler, 257 self.consumer)) 258 259 def start(self): 260 """Starts the workers main loop.""" 261 self._state = self.RUN 262 263 try: 264 for i, component in enumerate(self.components): 265 self.logger.debug("Starting thread %s...", 266 component.__class__.__name__) 267 self._running = i + 1 268 component.start() 269 except SystemTerminate: 270 self.terminate() 271 except Exception, exc: 272 self.logger.error("Unrecoverable error: %r" % (exc, ), 273 exc_info=sys.exc_info()) 274 self.stop() 275 except (KeyboardInterrupt, SystemExit): 276 self.stop() 277 278 # Will only get here if running green, 279 # makes sure all greenthreads have exited. 280 self._shutdown_complete.wait() 281 282 def process_task(self, request): 283 """Process task by sending it to the pool of workers.""" 284 try: 285 request.task.execute(request, self.pool, 286 self.loglevel, self.logfile) 287 except Exception, exc: 288 self.logger.critical("Internal error %s: %s\n%s", 289 exc.__class__, exc, traceback.format_exc(), 290 exc_info=True) 291 except SystemTerminate: 292 self.terminate() 293 raise 294 except BaseException, exc: 295 self.stop() 296 raise exc 297 298 def stop(self, in_sighandler=False): 299 """Graceful shutdown of the worker server.""" 300 if not in_sighandler or self.pool.signal_safe: 301 self._shutdown(warm=True) 302 303 def terminate(self, in_sighandler=False): 304 """Not so graceful shutdown of the worker server.""" 305 if not in_sighandler or self.pool.signal_safe: 306 self._shutdown(warm=False) 307 308 def _shutdown(self, warm=True): 309 what = (warm and "stopping" or "terminating").capitalize() 310 311 if self._state in (self.CLOSE, self.TERMINATE): 312 return 313 314 if self._state != self.RUN or self._running != len(self.components): 315 # Not fully started, can safely exit. 316 self._state = self.TERMINATE 317 self._shutdown_complete.set() 318 return 319 320 self._state = self.CLOSE 321 signals.worker_shutdown.send(sender=self) 322 323 for component in reversed(self.components): 324 self.logger.debug("%s thread %s...", what, 325 component.__class__.__name__) 326 stop = component.stop 327 if not warm: 328 stop = getattr(component, "terminate", None) or stop 329 stop() 330 331 self.priority_timer.stop() 332 self.consumer.close_connection() 333 334 self._state = self.TERMINATE 335 self._shutdown_complete.set() 336 337 def on_timer_error(self, exc_info): 338 _, exc, _ = exc_info 339 self.logger.error("Timer error: %r", exc, exc_info=exc_info) 340 341 def on_timer_tick(self, delay): 342 self.timer_debug("Scheduler wake-up! Next eta %s secs." % delay) 343 [end of celery/worker/__init__.py] [start of celery/worker/autoscale.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.autoscale 4 ~~~~~~~~~~~~~~~~~~~~~~~ 5 6 This module implements the internal thread responsible 7 for growing and shrinking the pool according to the 8 current autoscale settings. 9 10 The autoscale thread is only enabled if autoscale 11 has been enabled on the command line. 12 13 :copyright: (c) 2009 - 2011 by Ask Solem. 14 :license: BSD, see LICENSE for more details. 15 16 """ 17 from __future__ import absolute_import 18 from __future__ import with_statement 19 20 import os 21 import sys 22 import threading 23 import traceback 24 25 from time import sleep, time 26 27 from . import state 28 29 30 class Autoscaler(threading.Thread): 31 32 def __init__(self, pool, max_concurrency, min_concurrency=0, 33 keepalive=30, logger=None): 34 threading.Thread.__init__(self) 35 self.pool = pool 36 self.mutex = threading.Lock() 37 self.max_concurrency = max_concurrency 38 self.min_concurrency = min_concurrency 39 self.keepalive = keepalive 40 self.logger = logger 41 self._last_action = None 42 self._is_shutdown = threading.Event() 43 self._is_stopped = threading.Event() 44 self.setDaemon(True) 45 self.setName(self.__class__.__name__) 46 47 assert self.keepalive, "can't scale down too fast." 48 49 def scale(self): 50 with self.mutex: 51 current = min(self.qty, self.max_concurrency) 52 if current > self.processes: 53 self.scale_up(current - self.processes) 54 elif current < self.processes: 55 self.scale_down( 56 (self.processes - current) - self.min_concurrency) 57 58 def update(self, max=None, min=None): 59 with self.mutex: 60 if max is not None: 61 if max < self.max_concurrency: 62 self._shrink(self.processes - max) 63 self.max_concurrency = max 64 if min is not None: 65 if min > self.min_concurrency: 66 self._grow(min - self.min_concurrency) 67 self.min_concurrency = min 68 return self.max_concurrency, self.min_concurrency 69 70 def force_scale_up(self, n): 71 with self.mutex: 72 new = self.processes + n 73 if new > self.max_concurrency: 74 self.max_concurrency = new 75 self.min_concurrency += 1 76 self._grow(n) 77 78 def force_scale_down(self, n): 79 with self.mutex: 80 new = self.processes - n 81 if new < self.min_concurrency: 82 self.min_concurrency = new 83 self._shrink(n) 84 85 def scale_up(self, n): 86 self._last_action = time() 87 return self._grow(n) 88 89 def _grow(self, n): 90 self.logger.info("Scaling up %s processes.", n) 91 self.pool.grow(n) 92 93 def _shrink(self, n): 94 self.logger.info("Scaling down %s processes.", n) 95 try: 96 self.pool.shrink(n) 97 except ValueError: 98 self.logger.debug( 99 "Autoscaler won't scale down: all processes busy.") 100 except Exception, exc: 101 self.logger.error("Autoscaler: scale_down: %r\n%r", 102 exc, traceback.format_stack(), 103 exc_info=sys.exc_info()) 104 105 def scale_down(self, n): 106 if not self._last_action or not n: 107 return 108 if time() - self._last_action > self.keepalive: 109 self._last_action = time() 110 self._shrink(n) 111 112 def run(self): 113 while not self._is_shutdown.isSet(): 114 try: 115 self.scale() 116 sleep(1.0) 117 except Exception, exc: 118 self.logger.error("Thread Autoscaler crashed: %r", exc, 119 exc_info=sys.exc_info()) 120 os._exit(1) 121 self._is_stopped.set() 122 123 def stop(self): 124 self._is_shutdown.set() 125 self._is_stopped.wait() 126 if self.isAlive(): 127 self.join(1e10) 128 129 def info(self): 130 return {"max": self.max_concurrency, 131 "min": self.min_concurrency, 132 "current": self.processes, 133 "qty": self.qty} 134 135 @property 136 def qty(self): 137 return len(state.reserved_requests) 138 139 @property 140 def processes(self): 141 return self.pool.num_processes 142 [end of celery/worker/autoscale.py] [start of celery/worker/buckets.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.buckets 4 ~~~~~~~~~~~~~~~~~~~~~ 5 6 This module implements the rate limiting of tasks, 7 by having a token bucket queue for each task type. 8 When a task is allowed to be processed it's moved 9 over the the ``ready_queue`` 10 11 The :mod:`celery.worker.mediator` is then responsible 12 for moving tasks from the ``ready_queue`` to the worker pool. 13 14 :copyright: (c) 2009 - 2011 by Ask Solem. 15 :license: BSD, see LICENSE for more details. 16 17 """ 18 from __future__ import absolute_import 19 from __future__ import with_statement 20 21 import threading 22 23 from collections import deque 24 from time import time, sleep 25 from Queue import Queue, Empty 26 27 from ..datastructures import TokenBucket 28 from ..utils import timeutils 29 from ..utils.compat import zip_longest, chain_from_iterable 30 31 32 class RateLimitExceeded(Exception): 33 """The token buckets rate limit has been exceeded.""" 34 35 36 class TaskBucket(object): 37 """This is a collection of token buckets, each task type having 38 its own token bucket. If the task type doesn't have a rate limit, 39 it will have a plain :class:`~Queue.Queue` object instead of a 40 :class:`TokenBucketQueue`. 41 42 The :meth:`put` operation forwards the task to its appropriate bucket, 43 while the :meth:`get` operation iterates over the buckets and retrieves 44 the first available item. 45 46 Say we have three types of tasks in the registry: `celery.ping`, 47 `feed.refresh` and `video.compress`, the TaskBucket will consist 48 of the following items:: 49 50 {"celery.ping": TokenBucketQueue(fill_rate=300), 51 "feed.refresh": Queue(), 52 "video.compress": TokenBucketQueue(fill_rate=2)} 53 54 The get operation will iterate over these until one of the buckets 55 is able to return an item. The underlying datastructure is a `dict`, 56 so the order is ignored here. 57 58 :param task_registry: The task registry used to get the task 59 type class for a given task name. 60 61 """ 62 63 def __init__(self, task_registry): 64 self.task_registry = task_registry 65 self.buckets = {} 66 self.init_with_registry() 67 self.immediate = deque() 68 self.mutex = threading.Lock() 69 self.not_empty = threading.Condition(self.mutex) 70 71 def put(self, request): 72 """Put a :class:`~celery.worker.job.TaskRequest` into 73 the appropiate bucket.""" 74 with self.mutex: 75 if request.task_name not in self.buckets: 76 self.add_bucket_for_type(request.task_name) 77 self.buckets[request.task_name].put_nowait(request) 78 self.not_empty.notify() 79 put_nowait = put 80 81 def _get_immediate(self): 82 try: 83 return self.immediate.popleft() 84 except IndexError: 85 raise Empty() 86 87 def _get(self): 88 # If the first bucket is always returning items, we would never 89 # get to fetch items from the other buckets. So we always iterate over 90 # all the buckets and put any ready items into a queue called 91 # "immediate". This queue is always checked for cached items first. 92 try: 93 return 0, self._get_immediate() 94 except Empty: 95 pass 96 97 remaining_times = [] 98 for bucket in self.buckets.values(): 99 remaining = bucket.expected_time() 100 if not remaining: 101 try: 102 # Just put any ready items into the immediate queue. 103 self.immediate.append(bucket.get_nowait()) 104 except Empty: 105 pass 106 except RateLimitExceeded: 107 remaining_times.append(bucket.expected_time()) 108 else: 109 remaining_times.append(remaining) 110 111 # Try the immediate queue again. 112 try: 113 return 0, self._get_immediate() 114 except Empty: 115 if not remaining_times: 116 # No items in any of the buckets. 117 raise 118 119 # There's items, but have to wait before we can retrieve them, 120 # return the shortest remaining time. 121 return min(remaining_times), None 122 123 def get(self, block=True, timeout=None): 124 """Retrive the task from the first available bucket. 125 126 Available as in, there is an item in the queue and you can 127 consume tokens from it. 128 129 """ 130 time_start = time() 131 did_timeout = lambda: timeout and time() - time_start > timeout 132 133 with self.not_empty: 134 while True: 135 try: 136 remaining_time, item = self._get() 137 except Empty: 138 if not block or did_timeout(): 139 raise 140 self.not_empty.wait(timeout) 141 continue 142 if remaining_time: 143 if not block or did_timeout(): 144 raise Empty() 145 sleep(min(remaining_time, timeout or 1)) 146 else: 147 return item 148 149 def get_nowait(self): 150 return self.get(block=False) 151 152 def init_with_registry(self): 153 """Initialize with buckets for all the task types in the registry.""" 154 for task in self.task_registry.keys(): 155 self.add_bucket_for_type(task) 156 157 def refresh(self): 158 """Refresh rate limits for all task types in the registry.""" 159 for task in self.task_registry.keys(): 160 self.update_bucket_for_type(task) 161 162 def get_bucket_for_type(self, task_name): 163 """Get the bucket for a particular task type.""" 164 if task_name not in self.buckets: 165 return self.add_bucket_for_type(task_name) 166 return self.buckets[task_name] 167 168 def _get_queue_for_type(self, task_name): 169 bucket = self.buckets[task_name] 170 if isinstance(bucket, TokenBucketQueue): 171 return bucket.queue 172 return bucket 173 174 def update_bucket_for_type(self, task_name): 175 task_type = self.task_registry[task_name] 176 rate_limit = getattr(task_type, "rate_limit", None) 177 rate_limit = timeutils.rate(rate_limit) 178 task_queue = FastQueue() 179 if task_name in self.buckets: 180 task_queue = self._get_queue_for_type(task_name) 181 else: 182 task_queue = FastQueue() 183 184 if rate_limit: 185 task_queue = TokenBucketQueue(rate_limit, queue=task_queue) 186 187 self.buckets[task_name] = task_queue 188 return task_queue 189 190 def add_bucket_for_type(self, task_name): 191 """Add a bucket for a task type. 192 193 Will read the tasks rate limit and create a :class:`TokenBucketQueue` 194 if it has one. If the task doesn't have a rate limit 195 :class:`FastQueue` will be used instead. 196 197 """ 198 if task_name not in self.buckets: 199 return self.update_bucket_for_type(task_name) 200 201 def qsize(self): 202 """Get the total size of all the queues.""" 203 return sum(bucket.qsize() for bucket in self.buckets.values()) 204 205 def empty(self): 206 """Returns :const:`True` if all of the buckets are empty.""" 207 return all(bucket.empty() for bucket in self.buckets.values()) 208 209 def clear(self): 210 """Delete the data in all of the buckets.""" 211 for bucket in self.buckets.values(): 212 bucket.clear() 213 214 @property 215 def items(self): 216 """Flattens the data in all of the buckets into a single list.""" 217 # for queues with contents [(1, 2), (3, 4), (5, 6), (7, 8)] 218 # zips and flattens to [1, 3, 5, 7, 2, 4, 6, 8] 219 return filter(None, chain_from_iterable(zip_longest(*[bucket.items 220 for bucket in self.buckets.values()]))) 221 222 223 class FastQueue(Queue): 224 """:class:`Queue.Queue` supporting the interface of 225 :class:`TokenBucketQueue`.""" 226 227 def clear(self): 228 return self.queue.clear() 229 230 def expected_time(self, tokens=1): 231 return 0 232 233 def wait(self, block=True): 234 return self.get(block=block) 235 236 @property 237 def items(self): 238 return self.queue 239 240 241 class TokenBucketQueue(object): 242 """Queue with rate limited get operations. 243 244 This uses the token bucket algorithm to rate limit the queue on get 245 operations. 246 247 :param fill_rate: The rate in tokens/second that the bucket will 248 be refilled. 249 :keyword capacity: Maximum number of tokens in the bucket. 250 Default is 1. 251 252 """ 253 RateLimitExceeded = RateLimitExceeded 254 255 def __init__(self, fill_rate, queue=None, capacity=1): 256 self._bucket = TokenBucket(fill_rate, capacity) 257 self.queue = queue 258 if not self.queue: 259 self.queue = Queue() 260 261 def put(self, item, block=True): 262 """Put an item onto the queue.""" 263 self.queue.put(item, block=block) 264 265 def put_nowait(self, item): 266 """Put an item into the queue without blocking. 267 268 :raises Queue.Full: If a free slot is not immediately available. 269 270 """ 271 return self.put(item, block=False) 272 273 def get(self, block=True): 274 """Remove and return an item from the queue. 275 276 :raises RateLimitExceeded: If a token could not be consumed from the 277 token bucket (consuming from the queue 278 too fast). 279 :raises Queue.Empty: If an item is not immediately available. 280 281 """ 282 get = block and self.queue.get or self.queue.get_nowait 283 284 if not block and not self.items: 285 raise Empty() 286 287 if not self._bucket.can_consume(1): 288 raise RateLimitExceeded() 289 290 return get() 291 292 def get_nowait(self): 293 """Remove and return an item from the queue without blocking. 294 295 :raises RateLimitExceeded: If a token could not be consumed from the 296 token bucket (consuming from the queue 297 too fast). 298 :raises Queue.Empty: If an item is not immediately available. 299 300 """ 301 return self.get(block=False) 302 303 def qsize(self): 304 """Returns the size of the queue.""" 305 return self.queue.qsize() 306 307 def empty(self): 308 """Returns :const:`True` if the queue is empty.""" 309 return self.queue.empty() 310 311 def clear(self): 312 """Delete all data in the queue.""" 313 return self.items.clear() 314 315 def wait(self, block=False): 316 """Wait until a token can be retrieved from the bucket and return 317 the next item.""" 318 get = self.get 319 expected_time = self.expected_time 320 while 1: 321 remaining = expected_time() 322 if not remaining: 323 return get(block=block) 324 sleep(remaining) 325 326 def expected_time(self, tokens=1): 327 """Returns the expected time in seconds of when a new token should be 328 available.""" 329 if not self.items: 330 return 0 331 return self._bucket.expected_time(tokens) 332 333 @property 334 def items(self): 335 """Underlying data. Do not modify.""" 336 return self.queue.queue 337 [end of celery/worker/buckets.py] [start of celery/worker/consumer.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.consumer 4 ~~~~~~~~~~~~~~~~~~~~~~ 5 6 This module contains the component responsible for consuming messages 7 from the broker, processing the messages and keeping the broker connections 8 up and running. 9 10 :copyright: (c) 2009 - 2011 by Ask Solem. 11 :license: BSD, see LICENSE for more details. 12 13 14 * :meth:`~Consumer.start` is an infinite loop, which only iterates 15 again if the connection is lost. For each iteration (at start, or if the 16 connection is lost) it calls :meth:`~Consumer.reset_connection`, 17 and starts the consumer by calling :meth:`~Consumer.consume_messages`. 18 19 * :meth:`~Consumer.reset_connection`, clears the internal queues, 20 establishes a new connection to the broker, sets up the task 21 consumer (+ QoS), and the broadcast remote control command consumer. 22 23 Also if events are enabled it configures the event dispatcher and starts 24 up the heartbeat thread. 25 26 * Finally it can consume messages. :meth:`~Consumer.consume_messages` 27 is simply an infinite loop waiting for events on the AMQP channels. 28 29 Both the task consumer and the broadcast consumer uses the same 30 callback: :meth:`~Consumer.receive_message`. 31 32 * So for each message received the :meth:`~Consumer.receive_message` 33 method is called, this checks the payload of the message for either 34 a `task` key or a `control` key. 35 36 If the message is a task, it verifies the validity of the message 37 converts it to a :class:`celery.worker.job.TaskRequest`, and sends 38 it to :meth:`~Consumer.on_task`. 39 40 If the message is a control command the message is passed to 41 :meth:`~Consumer.on_control`, which in turn dispatches 42 the control command using the control dispatcher. 43 44 It also tries to handle malformed or invalid messages properly, 45 so the worker doesn't choke on them and die. Any invalid messages 46 are acknowledged immediately and logged, so the message is not resent 47 again, and again. 48 49 * If the task has an ETA/countdown, the task is moved to the `eta_schedule` 50 so the :class:`timer2.Timer` can schedule it at its 51 deadline. Tasks without an eta are moved immediately to the `ready_queue`, 52 so they can be picked up by the :class:`~celery.worker.mediator.Mediator` 53 to be sent to the pool. 54 55 * When a task with an ETA is received the QoS prefetch count is also 56 incremented, so another message can be reserved. When the ETA is met 57 the prefetch count is decremented again, though this cannot happen 58 immediately because amqplib doesn't support doing broker requests 59 across threads. Instead the current prefetch count is kept as a 60 shared counter, so as soon as :meth:`~Consumer.consume_messages` 61 detects that the value has changed it will send out the actual 62 QoS event to the broker. 63 64 * Notice that when the connection is lost all internal queues are cleared 65 because we can no longer ack the messages reserved in memory. 66 However, this is not dangerous as the broker will resend them 67 to another worker when the channel is closed. 68 69 * **WARNING**: :meth:`~Consumer.stop` does not close the connection! 70 This is because some pre-acked messages may be in processing, 71 and they need to be finished before the channel is closed. 72 For celeryd this means the pool must finish the tasks it has acked 73 early, *then* close the connection. 74 75 """ 76 from __future__ import absolute_import 77 from __future__ import with_statement 78 79 import socket 80 import sys 81 import threading 82 import traceback 83 import warnings 84 85 from ..app import app_or_default 86 from ..datastructures import AttributeDict 87 from ..exceptions import NotRegistered 88 from ..utils import noop 89 from ..utils import timer2 90 from ..utils.encoding import safe_repr 91 from . import state 92 from .job import TaskRequest, InvalidTaskError 93 from .control.registry import Panel 94 from .heartbeat import Heart 95 96 RUN = 0x1 97 CLOSE = 0x2 98 99 #: Prefetch count can't exceed short. 100 PREFETCH_COUNT_MAX = 0xFFFF 101 102 #: Error message for when an unregistered task is received. 103 UNKNOWN_TASK_ERROR = """\ 104 Received unregistered task of type %s. 105 The message has been ignored and discarded. 106 107 Did you remember to import the module containing this task? 108 Or maybe you are using relative imports? 109 Please see http://bit.ly/gLye1c for more information. 110 111 The full contents of the message body was: 112 %s 113 """ 114 115 #: Error message for when an invalid task message is received. 116 INVALID_TASK_ERROR = """\ 117 Received invalid task message: %s 118 The message has been ignored and discarded. 119 120 Please ensure your message conforms to the task 121 message protocol as described here: http://bit.ly/hYj41y 122 123 The full contents of the message body was: 124 %s 125 """ 126 127 MESSAGE_REPORT_FMT = """\ 128 body: %s {content_type:%s content_encoding:%s delivery_info:%s}\ 129 """ 130 131 132 class QoS(object): 133 """Quality of Service for Channel. 134 135 For thread-safe increment/decrement of a channels prefetch count value. 136 137 :param consumer: A :class:`kombu.messaging.Consumer` instance. 138 :param initial_value: Initial prefetch count value. 139 :param logger: Logger used to log debug messages. 140 141 """ 142 prev = None 143 144 def __init__(self, consumer, initial_value, logger): 145 self.consumer = consumer 146 self.logger = logger 147 self._mutex = threading.RLock() 148 self.value = initial_value 149 150 def increment(self, n=1): 151 """Increment the current prefetch count value by n.""" 152 with self._mutex: 153 if self.value: 154 new_value = self.value + max(n, 0) 155 self.value = self.set(new_value) 156 return self.value 157 158 def _sub(self, n=1): 159 assert self.value - n > 1 160 self.value -= n 161 162 def decrement(self, n=1): 163 """Decrement the current prefetch count value by n.""" 164 with self._mutex: 165 if self.value: 166 self._sub(n) 167 self.set(self.value) 168 return self.value 169 170 def decrement_eventually(self, n=1): 171 """Decrement the value, but do not update the qos. 172 173 The MainThread will be responsible for calling :meth:`update` 174 when necessary. 175 176 """ 177 with self._mutex: 178 if self.value: 179 self._sub(n) 180 181 def set(self, pcount): 182 """Set channel prefetch_count setting.""" 183 if pcount != self.prev: 184 new_value = pcount 185 if pcount > PREFETCH_COUNT_MAX: 186 self.logger.warning("QoS: Disabled: prefetch_count exceeds %r", 187 PREFETCH_COUNT_MAX) 188 new_value = 0 189 self.logger.debug("basic.qos: prefetch_count->%s", new_value) 190 self.consumer.qos(prefetch_count=new_value) 191 self.prev = pcount 192 return pcount 193 194 def update(self): 195 """Update prefetch count with current value.""" 196 with self._mutex: 197 return self.set(self.value) 198 199 200 class Consumer(object): 201 """Listen for messages received from the broker and 202 move them to the ready queue for task processing. 203 204 :param ready_queue: See :attr:`ready_queue`. 205 :param eta_schedule: See :attr:`eta_schedule`. 206 207 """ 208 209 #: The queue that holds tasks ready for immediate processing. 210 ready_queue = None 211 212 #: Timer for tasks with an ETA/countdown. 213 eta_schedule = None 214 215 #: Enable/disable events. 216 send_events = False 217 218 #: Optional callback to be called when the connection is established. 219 #: Will only be called once, even if the connection is lost and 220 #: re-established. 221 init_callback = None 222 223 #: The current hostname. Defaults to the system hostname. 224 hostname = None 225 226 #: Initial QoS prefetch count for the task channel. 227 initial_prefetch_count = 0 228 229 #: A :class:`celery.events.EventDispatcher` for sending events. 230 event_dispatcher = None 231 232 #: The thread that sends event heartbeats at regular intervals. 233 #: The heartbeats are used by monitors to detect that a worker 234 #: went offline/disappeared. 235 heart = None 236 237 #: The logger instance to use. Defaults to the default Celery logger. 238 logger = None 239 240 #: The broker connection. 241 connection = None 242 243 #: The consumer used to consume task messages. 244 task_consumer = None 245 246 #: The consumer used to consume broadcast commands. 247 broadcast_consumer = None 248 249 #: The process mailbox (kombu pidbox node). 250 pidbox_node = None 251 _pidbox_node_shutdown = None # used for greenlets 252 _pidbox_node_stopped = None # used for greenlets 253 254 #: The current worker pool instance. 255 pool = None 256 257 #: A timer used for high-priority internal tasks, such 258 #: as sending heartbeats. 259 priority_timer = None 260 261 # Consumer state, can be RUN or CLOSE. 262 _state = None 263 264 def __init__(self, ready_queue, eta_schedule, logger, 265 init_callback=noop, send_events=False, hostname=None, 266 initial_prefetch_count=2, pool=None, app=None, 267 priority_timer=None, controller=None): 268 self.app = app_or_default(app) 269 self.connection = None 270 self.task_consumer = None 271 self.controller = controller 272 self.broadcast_consumer = None 273 self.ready_queue = ready_queue 274 self.eta_schedule = eta_schedule 275 self.send_events = send_events 276 self.init_callback = init_callback 277 self.logger = logger 278 self.hostname = hostname or socket.gethostname() 279 self.initial_prefetch_count = initial_prefetch_count 280 self.event_dispatcher = None 281 self.heart = None 282 self.pool = pool 283 self.priority_timer = priority_timer or timer2.default_timer 284 pidbox_state = AttributeDict(app=self.app, 285 logger=logger, 286 hostname=self.hostname, 287 listener=self, # pre 2.2 288 consumer=self) 289 self.pidbox_node = self.app.control.mailbox.Node(self.hostname, 290 state=pidbox_state, 291 handlers=Panel.data) 292 conninfo = self.app.broker_connection() 293 self.connection_errors = conninfo.connection_errors 294 self.channel_errors = conninfo.channel_errors 295 296 def start(self): 297 """Start the consumer. 298 299 Automatically survives intermittent connection failure, 300 and will retry establishing the connection and restart 301 consuming messages. 302 303 """ 304 305 self.init_callback(self) 306 307 while self._state != CLOSE: 308 try: 309 self.reset_connection() 310 self.consume_messages() 311 except self.connection_errors: 312 self.logger.error("Consumer: Connection to broker lost." 313 + " Trying to re-establish the connection...", 314 exc_info=sys.exc_info()) 315 316 def consume_messages(self): 317 """Consume messages forever (or until an exception is raised).""" 318 self._debug("Starting message consumer...") 319 self.task_consumer.consume() 320 self._debug("Ready to accept tasks!") 321 322 while self._state != CLOSE and self.connection: 323 if self.qos.prev != self.qos.value: 324 self.qos.update() 325 try: 326 self.connection.drain_events(timeout=1) 327 except socket.timeout: 328 pass 329 except socket.error: 330 if self._state != CLOSE: 331 raise 332 333 def on_task(self, task): 334 """Handle received task. 335 336 If the task has an `eta` we enter it into the ETA schedule, 337 otherwise we move it the ready queue for immediate processing. 338 339 """ 340 341 if task.revoked(): 342 return 343 344 self.logger.info("Got task from broker: %s", task.shortinfo()) 345 346 if self.event_dispatcher.enabled: 347 self.event_dispatcher.send("task-received", uuid=task.task_id, 348 name=task.task_name, args=safe_repr(task.args), 349 kwargs=safe_repr(task.kwargs), retries=task.retries, 350 eta=task.eta and task.eta.isoformat(), 351 expires=task.expires and task.expires.isoformat()) 352 353 if task.eta: 354 try: 355 eta = timer2.to_timestamp(task.eta) 356 except OverflowError, exc: 357 self.logger.error( 358 "Couldn't convert eta %s to timestamp: %r. Task: %r", 359 task.eta, exc, task.info(safe=True), 360 exc_info=sys.exc_info()) 361 task.acknowledge() 362 else: 363 self.qos.increment() 364 self.eta_schedule.apply_at(eta, 365 self.apply_eta_task, (task, )) 366 else: 367 state.task_reserved(task) 368 self.ready_queue.put(task) 369 370 def on_control(self, body, message): 371 """Process remote control command message.""" 372 try: 373 self.pidbox_node.handle_message(body, message) 374 except KeyError, exc: 375 self.logger.error("No such control command: %s", exc) 376 except Exception, exc: 377 self.logger.error( 378 "Error occurred while handling control command: %r\n%r", 379 exc, traceback.format_exc(), exc_info=sys.exc_info()) 380 self.reset_pidbox_node() 381 382 def apply_eta_task(self, task): 383 """Method called by the timer to apply a task with an 384 ETA/countdown.""" 385 state.task_reserved(task) 386 self.ready_queue.put(task) 387 self.qos.decrement_eventually() 388 389 def _message_report(self, body, message): 390 return MESSAGE_REPORT_FMT % (safe_repr(body), 391 safe_repr(message.content_type), 392 safe_repr(message.content_encoding), 393 safe_repr(message.delivery_info)) 394 395 def receive_message(self, body, message): 396 """Handles incoming messages. 397 398 :param body: The message body. 399 :param message: The kombu message object. 400 401 """ 402 # need to guard against errors occurring while acking the message. 403 def ack(): 404 try: 405 message.ack() 406 except self.connection_errors + (AttributeError, ), exc: 407 self.logger.critical( 408 "Couldn't ack %r: %s reason:%r", 409 message.delivery_tag, 410 self._message_report(body, message), exc) 411 412 try: 413 body["task"] 414 except (KeyError, TypeError): 415 warnings.warn(RuntimeWarning( 416 "Received and deleted unknown message. Wrong destination?!? \ 417 the full contents of the message body was: %s" % ( 418 self._message_report(body, message), ))) 419 ack() 420 return 421 422 try: 423 task = TaskRequest.from_message(message, body, ack, 424 app=self.app, 425 logger=self.logger, 426 hostname=self.hostname, 427 eventer=self.event_dispatcher) 428 429 except NotRegistered, exc: 430 self.logger.error(UNKNOWN_TASK_ERROR, exc, safe_repr(body), 431 exc_info=sys.exc_info()) 432 ack() 433 except InvalidTaskError, exc: 434 self.logger.error(INVALID_TASK_ERROR, str(exc), safe_repr(body), 435 exc_info=sys.exc_info()) 436 ack() 437 else: 438 self.on_task(task) 439 440 def maybe_conn_error(self, fun): 441 """Applies function but ignores any connection or channel 442 errors raised.""" 443 try: 444 fun() 445 except (AttributeError, ) + \ 446 self.connection_errors + \ 447 self.channel_errors: 448 pass 449 450 def close_connection(self): 451 """Closes the current broker connection and all open channels.""" 452 453 # We must set self.connection to None here, so 454 # that the green pidbox thread exits. 455 connection, self.connection = self.connection, None 456 457 if self.task_consumer: 458 self._debug("Closing consumer channel...") 459 self.task_consumer = \ 460 self.maybe_conn_error(self.task_consumer.close) 461 462 self.stop_pidbox_node() 463 464 if connection: 465 self._debug("Closing broker connection...") 466 self.maybe_conn_error(connection.close) 467 468 def stop_consumers(self, close_connection=True): 469 """Stop consuming tasks and broadcast commands, also stops 470 the heartbeat thread and event dispatcher. 471 472 :keyword close_connection: Set to False to skip closing the broker 473 connection. 474 475 """ 476 if not self._state == RUN: 477 return 478 479 if self.heart: 480 # Stop the heartbeat thread if it's running. 481 self.logger.debug("Heart: Going into cardiac arrest...") 482 self.heart = self.heart.stop() 483 484 self._debug("Cancelling task consumer...") 485 if self.task_consumer: 486 self.maybe_conn_error(self.task_consumer.cancel) 487 488 if self.event_dispatcher: 489 self._debug("Shutting down event dispatcher...") 490 self.event_dispatcher = \ 491 self.maybe_conn_error(self.event_dispatcher.close) 492 493 self._debug("Cancelling broadcast consumer...") 494 if self.broadcast_consumer: 495 self.maybe_conn_error(self.broadcast_consumer.cancel) 496 497 if close_connection: 498 self.close_connection() 499 500 def on_decode_error(self, message, exc): 501 """Callback called if an error occurs while decoding 502 a message received. 503 504 Simply logs the error and acknowledges the message so it 505 doesn't enter a loop. 506 507 :param message: The message with errors. 508 :param exc: The original exception instance. 509 510 """ 511 self.logger.critical( 512 "Can't decode message body: %r (type:%r encoding:%r raw:%r')", 513 exc, message.content_type, message.content_encoding, 514 safe_repr(message.body)) 515 message.ack() 516 517 def reset_pidbox_node(self): 518 """Sets up the process mailbox.""" 519 self.stop_pidbox_node() 520 # close previously opened channel if any. 521 if self.pidbox_node.channel: 522 try: 523 self.pidbox_node.channel.close() 524 except self.connection_errors + self.channel_errors: 525 pass 526 527 if self.pool is not None and self.pool.is_green: 528 return self.pool.spawn_n(self._green_pidbox_node) 529 self.pidbox_node.channel = self.connection.channel() 530 self.broadcast_consumer = self.pidbox_node.listen( 531 callback=self.on_control) 532 self.broadcast_consumer.consume() 533 534 def stop_pidbox_node(self): 535 if self._pidbox_node_stopped: 536 self._pidbox_node_shutdown.set() 537 self._debug("Waiting for broadcast thread to shutdown...") 538 self._pidbox_node_stopped.wait() 539 self._pidbox_node_stopped = self._pidbox_node_shutdown = None 540 elif self.broadcast_consumer: 541 self._debug("Closing broadcast channel...") 542 self.broadcast_consumer = \ 543 self.maybe_conn_error(self.broadcast_consumer.channel.close) 544 545 def _green_pidbox_node(self): 546 """Sets up the process mailbox when running in a greenlet 547 environment.""" 548 # THIS CODE IS TERRIBLE 549 # Luckily work has already started rewriting the Consumer for 3.0. 550 self._pidbox_node_shutdown = threading.Event() 551 self._pidbox_node_stopped = threading.Event() 552 try: 553 with self._open_connection() as conn: 554 self.pidbox_node.channel = conn.default_channel 555 self.broadcast_consumer = self.pidbox_node.listen( 556 callback=self.on_control) 557 with self.broadcast_consumer: 558 while not self._pidbox_node_shutdown.isSet(): 559 try: 560 conn.drain_events(timeout=1.0) 561 except socket.timeout: 562 pass 563 finally: 564 self._pidbox_node_stopped.set() 565 566 def reset_connection(self): 567 """Re-establish the broker connection and set up consumers, 568 heartbeat and the event dispatcher.""" 569 self._debug("Re-establishing connection to the broker...") 570 self.stop_consumers() 571 572 # Clear internal queues to get rid of old messages. 573 # They can't be acked anyway, as a delivery tag is specific 574 # to the current channel. 575 self.ready_queue.clear() 576 self.eta_schedule.clear() 577 578 # Re-establish the broker connection and setup the task consumer. 579 self.connection = self._open_connection() 580 self._debug("Connection established.") 581 self.task_consumer = self.app.amqp.get_task_consumer(self.connection, 582 on_decode_error=self.on_decode_error) 583 # QoS: Reset prefetch window. 584 self.qos = QoS(self.task_consumer, 585 self.initial_prefetch_count, self.logger) 586 self.qos.update() 587 588 # receive_message handles incoming messages. 589 self.task_consumer.register_callback(self.receive_message) 590 591 # Setup the process mailbox. 592 self.reset_pidbox_node() 593 594 # Flush events sent while connection was down. 595 prev_event_dispatcher = self.event_dispatcher 596 self.event_dispatcher = self.app.events.Dispatcher(self.connection, 597 hostname=self.hostname, 598 enabled=self.send_events) 599 if prev_event_dispatcher: 600 self.event_dispatcher.copy_buffer(prev_event_dispatcher) 601 self.event_dispatcher.flush() 602 603 # Restart heartbeat thread. 604 self.restart_heartbeat() 605 606 # We're back! 607 self._state = RUN 608 609 def restart_heartbeat(self): 610 """Restart the heartbeat thread. 611 612 This thread sends heartbeat events at intervals so monitors 613 can tell if the worker is off-line/missing. 614 615 """ 616 self.heart = Heart(self.priority_timer, self.event_dispatcher) 617 self.heart.start() 618 619 def _open_connection(self): 620 """Establish the broker connection. 621 622 Will retry establishing the connection if the 623 :setting:`BROKER_CONNECTION_RETRY` setting is enabled 624 625 """ 626 627 # Callback called for each retry while the connection 628 # can't be established. 629 def _error_handler(exc, interval): 630 self.logger.error("Consumer: Connection Error: %s. " 631 "Trying again in %d seconds...", exc, interval) 632 633 # remember that the connection is lazy, it won't establish 634 # until it's needed. 635 conn = self.app.broker_connection() 636 if not self.app.conf.BROKER_CONNECTION_RETRY: 637 # retry disabled, just call connect directly. 638 conn.connect() 639 return conn 640 641 return conn.ensure_connection(_error_handler, 642 self.app.conf.BROKER_CONNECTION_MAX_RETRIES) 643 644 def stop(self): 645 """Stop consuming. 646 647 Does not close the broker connection, so be sure to call 648 :meth:`close_connection` when you are finished with it. 649 650 """ 651 # Notifies other threads that this instance can't be used 652 # anymore. 653 self._state = CLOSE 654 self._debug("Stopping consumers...") 655 self.stop_consumers(close_connection=False) 656 657 @property 658 def info(self): 659 """Returns information about this consumer instance 660 as a dict. 661 662 This is also the consumer related info returned by 663 ``celeryctl stats``. 664 665 """ 666 conninfo = {} 667 if self.connection: 668 conninfo = self.connection.info() 669 conninfo.pop("password", None) # don't send password. 670 return {"broker": conninfo, "prefetch_count": self.qos.value} 671 672 def _debug(self, msg, **kwargs): 673 self.logger.debug("Consumer: %s", msg, **kwargs) 674 [end of celery/worker/consumer.py] [start of celery/worker/control/__init__.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.control 4 ~~~~~~~~~~~~~~~~~~~~~ 5 6 Remote control commands. 7 See :mod:`celery.worker.control.builtins`. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 from . import registry 16 17 # Loads the built-in remote control commands 18 from . import builtins # noqa 19 20 Panel = registry.Panel 21 [end of celery/worker/control/__init__.py] [start of celery/worker/control/builtins.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.control.builtins 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 6 THis module contains the built-in remote control commands. 7 8 :copyright: (c) 2009 - 2011 by Ask Solem. 9 :license: BSD, see LICENSE for more details. 10 11 """ 12 from __future__ import absolute_import 13 14 import sys 15 16 from datetime import datetime 17 18 from ...platforms import signals as _signals 19 from ...registry import tasks 20 from ...utils import timeutils 21 from ...utils.encoding import safe_repr 22 from .. import state 23 from ..state import revoked 24 25 from .registry import Panel 26 27 TASK_INFO_FIELDS = ("exchange", "routing_key", "rate_limit") 28 29 30 @Panel.register 31 def revoke(panel, task_id, terminate=False, signal=None, **kwargs): 32 """Revoke task by task id.""" 33 revoked.add(task_id) 34 action = "revoked" 35 if terminate: 36 signum = _signals.signum(signal or "TERM") 37 for request in state.active_requests: 38 if request.task_id == task_id: 39 action = "terminated (%s)" % (signum, ) 40 request.terminate(panel.consumer.pool, signal=signum) 41 break 42 43 panel.logger.info("Task %s %s.", task_id, action) 44 return {"ok": "task %s %s" % (task_id, action)} 45 46 47 @Panel.register 48 def enable_events(panel): 49 dispatcher = panel.consumer.event_dispatcher 50 if not dispatcher.enabled: 51 dispatcher.enable() 52 dispatcher.send("worker-online") 53 panel.logger.info("Events enabled by remote.") 54 return {"ok": "events enabled"} 55 return {"ok": "events already enabled"} 56 57 58 @Panel.register 59 def disable_events(panel): 60 dispatcher = panel.consumer.event_dispatcher 61 if dispatcher.enabled: 62 dispatcher.send("worker-offline") 63 dispatcher.disable() 64 panel.logger.info("Events disabled by remote.") 65 return {"ok": "events disabled"} 66 return {"ok": "events already disabled"} 67 68 69 @Panel.register 70 def heartbeat(panel): 71 panel.logger.debug("Heartbeat requested by remote.") 72 dispatcher = panel.consumer.event_dispatcher 73 dispatcher.send("worker-heartbeat", **state.SOFTWARE_INFO) 74 75 76 @Panel.register 77 def rate_limit(panel, task_name, rate_limit, **kwargs): 78 """Set new rate limit for a task type. 79 80 See :attr:`celery.task.base.Task.rate_limit`. 81 82 :param task_name: Type of task. 83 :param rate_limit: New rate limit. 84 85 """ 86 87 try: 88 timeutils.rate(rate_limit) 89 except ValueError, exc: 90 return {"error": "Invalid rate limit string: %s" % exc} 91 92 try: 93 tasks[task_name].rate_limit = rate_limit 94 except KeyError: 95 panel.logger.error("Rate limit attempt for unknown task %s", 96 task_name, exc_info=sys.exc_info()) 97 return {"error": "unknown task"} 98 99 if not hasattr(panel.consumer.ready_queue, "refresh"): 100 panel.logger.error("Rate limit attempt, but rate limits disabled.") 101 return {"error": "rate limits disabled"} 102 103 panel.consumer.ready_queue.refresh() 104 105 if not rate_limit: 106 panel.logger.info("Disabled rate limits for tasks of type %s", 107 task_name) 108 return {"ok": "rate limit disabled successfully"} 109 110 panel.logger.info("New rate limit for tasks of type %s: %s.", 111 task_name, rate_limit) 112 return {"ok": "new rate limit set successfully"} 113 114 115 @Panel.register 116 def time_limit(panel, task_name=None, hard=None, soft=None, **kwargs): 117 try: 118 task = tasks[task_name] 119 except KeyError: 120 panel.logger.error("Change time limit attempt for unknown task %s", 121 task_name, exc_info=True) 122 return {"error": "unknown task"} 123 124 task.soft_time_limit = soft 125 task.time_limit = hard 126 127 panel.logger.info("New time limits for tasks of type %s: soft=%s hard=%s", 128 task_name, soft, hard) 129 return {"ok": "time limits set successfully"} 130 131 132 @Panel.register 133 def dump_schedule(panel, safe=False, **kwargs): 134 schedule = panel.consumer.eta_schedule.schedule 135 if not schedule.queue: 136 panel.logger.info("--Empty schedule--") 137 return [] 138 139 formatitem = lambda (i, item): "%s. %s pri%s %r" % (i, 140 datetime.fromtimestamp(item["eta"]), 141 item["priority"], 142 item["item"]) 143 info = map(formatitem, enumerate(schedule.info())) 144 panel.logger.debug("* Dump of current schedule:\n%s", "\n".join(info)) 145 scheduled_tasks = [] 146 for item in schedule.info(): 147 scheduled_tasks.append({"eta": item["eta"], 148 "priority": item["priority"], 149 "request": 150 item["item"].args[0].info(safe=safe)}) 151 return scheduled_tasks 152 153 154 @Panel.register 155 def dump_reserved(panel, safe=False, **kwargs): 156 ready_queue = panel.consumer.ready_queue 157 reserved = ready_queue.items 158 if not reserved: 159 panel.logger.info("--Empty queue--") 160 return [] 161 panel.logger.debug("* Dump of currently reserved tasks:\n%s", 162 "\n".join(map(safe_repr, reserved))) 163 return [request.info(safe=safe) 164 for request in reserved] 165 166 167 @Panel.register 168 def dump_active(panel, safe=False, **kwargs): 169 return [request.info(safe=safe) 170 for request in state.active_requests] 171 172 173 @Panel.register 174 def stats(panel, **kwargs): 175 asinfo = {} 176 if panel.consumer.controller.autoscaler: 177 asinfo = panel.consumer.controller.autoscaler.info() 178 return {"total": state.total_count, 179 "consumer": panel.consumer.info, 180 "pool": panel.consumer.pool.info, 181 "autoscaler": asinfo} 182 183 184 @Panel.register 185 def dump_revoked(panel, **kwargs): 186 return list(state.revoked) 187 188 189 @Panel.register 190 def dump_tasks(panel, **kwargs): 191 192 def _extract_info(task): 193 fields = dict((field, str(getattr(task, field, None))) 194 for field in TASK_INFO_FIELDS 195 if getattr(task, field, None) is not None) 196 info = map("=".join, fields.items()) 197 if not info: 198 return task.name 199 return "%s [%s]" % (task.name, " ".join(info)) 200 201 info = map(_extract_info, (tasks[task] 202 for task in sorted(tasks.keys()))) 203 panel.logger.debug("* Dump of currently registered tasks:\n%s", 204 "\n".join(info)) 205 206 return info 207 208 209 @Panel.register 210 def ping(panel, **kwargs): 211 return "pong" 212 213 214 @Panel.register 215 def pool_grow(panel, n=1, **kwargs): 216 if panel.consumer.controller.autoscaler: 217 panel.consumer.controller.autoscaler.force_scale_up(n) 218 else: 219 panel.consumer.pool.grow(n) 220 return {"ok": "spawned worker processes"} 221 222 223 @Panel.register 224 def pool_shrink(panel, n=1, **kwargs): 225 if panel.consumer.controller.autoscaler: 226 panel.consumer.controller.autoscaler.force_scale_down(n) 227 else: 228 panel.consumer.pool.shrink(n) 229 return {"ok": "terminated worker processes"} 230 231 232 @Panel.register 233 def autoscale(panel, max=None, min=None): 234 autoscaler = panel.consumer.controller.autoscaler 235 if autoscaler: 236 max_, min_ = autoscaler.update(max, min) 237 return {"ok": "autoscale now min=%r max=%r" % (max_, min_)} 238 raise ValueError("Autoscale not enabled") 239 240 241 @Panel.register 242 def shutdown(panel, **kwargs): 243 panel.logger.warning("Got shutdown from remote.") 244 raise SystemExit("Got shutdown from remote") 245 246 247 @Panel.register 248 def add_consumer(panel, queue=None, exchange=None, exchange_type="direct", 249 routing_key=None, **options): 250 cset = panel.consumer.task_consumer 251 if not cset.consuming_from(queue): 252 declaration = dict(queue=queue, 253 exchange=exchange, 254 exchange_type=exchange_type, 255 routing_key=routing_key, 256 **options) 257 cset.add_consumer_from_dict(**declaration) 258 cset.consume() 259 panel.logger.info("Started consuming from %r", declaration) 260 return {"ok": "started consuming from %s" % (queue, )} 261 else: 262 return {"ok": "already consuming from %s" % (queue, )} 263 264 265 @Panel.register 266 def cancel_consumer(panel, queue=None, **_): 267 cset = panel.consumer.task_consumer 268 cset.cancel_by_queue(queue) 269 return {"ok": "no longer consuming from %s" % (queue, )} 270 271 272 @Panel.register 273 def active_queues(panel): 274 """Returns the queues associated with each worker.""" 275 return [dict(queue.as_dict(recurse=True)) 276 for queue in panel.consumer.task_consumer.queues] 277 [end of celery/worker/control/builtins.py] [start of celery/worker/control/registry.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.control.registry 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 6 The registry keeps track of available remote control commands, 7 and can be used to register new commands. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 from ...utils.compat import UserDict 16 17 18 class Panel(UserDict): 19 data = dict() # Global registry. 20 21 @classmethod 22 def register(cls, method, name=None): 23 cls.data[name or method.__name__] = method 24 return method 25 [end of celery/worker/control/registry.py] [start of celery/worker/heartbeat.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.heartbeat 4 ~~~~~~~~~~~~~~~~~~~~~~~ 5 6 This is the internal thread that sends heartbeat events 7 at regular intervals. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 from .state import SOFTWARE_INFO 16 17 18 class Heart(object): 19 """Timer sending heartbeats at regular intervals. 20 21 :param timer: Timer instance. 22 :param eventer: Event dispatcher used to send the event. 23 :keyword interval: Time in seconds between heartbeats. 24 Default is 30 seconds. 25 26 """ 27 28 def __init__(self, timer, eventer, interval=None): 29 self.timer = timer 30 self.eventer = eventer 31 self.interval = interval or 30 32 self.tref = None 33 34 def _send(self, event): 35 return self.eventer.send(event, **SOFTWARE_INFO) 36 37 def start(self): 38 self._send("worker-online") 39 self.tref = self.timer.apply_interval(self.interval * 1000.0, 40 self._send, ("worker-heartbeat", )) 41 42 def stop(self): 43 if self.tref is not None: 44 self.timer.cancel(self.tref) 45 self.tref = None 46 self._send("worker-offline") 47 [end of celery/worker/heartbeat.py] [start of celery/worker/job.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.job 4 ~~~~~~~~~~~~~~~~~ 5 6 This module defines the :class:`TaskRequest` class, 7 which specifies how tasks are executed. 8 9 :copyright: (c) 2009 - 2011 by Ask Solem. 10 :license: BSD, see LICENSE for more details. 11 12 """ 13 from __future__ import absolute_import 14 15 import os 16 import sys 17 import time 18 import socket 19 import warnings 20 21 from datetime import datetime 22 23 from .. import current_app 24 from .. import exceptions 25 from .. import platforms 26 from .. import registry 27 from ..app import app_or_default 28 from ..datastructures import ExceptionInfo 29 from ..execute.trace import TaskTrace 30 from ..utils import noop, kwdict, fun_takes_kwargs, truncate_text 31 from ..utils.encoding import safe_repr, safe_str, default_encoding 32 from ..utils.serialization import get_pickleable_exception 33 from ..utils.timeutils import maybe_iso8601 34 35 from . import state 36 37 #: Keys to keep from the message delivery info. The values 38 #: of these keys must be pickleable. 39 WANTED_DELIVERY_INFO = ("exchange", "routing_key", "consumer_tag", ) 40 41 42 class InvalidTaskError(Exception): 43 """The task has invalid data or is not properly constructed.""" 44 pass 45 46 47 if sys.version_info >= (3, 0): 48 49 def default_encode(obj): 50 return obj 51 else: 52 53 def default_encode(obj): # noqa 54 return unicode(obj, default_encoding()) 55 56 57 class WorkerTaskTrace(TaskTrace): 58 """Wraps the task in a jail, catches all exceptions, and 59 saves the status and result of the task execution to the task 60 meta backend. 61 62 If the call was successful, it saves the result to the task result 63 backend, and sets the task status to `"SUCCESS"`. 64 65 If the call raises :exc:`~celery.exceptions.RetryTaskError`, it extracts 66 the original exception, uses that as the result and sets the task status 67 to `"RETRY"`. 68 69 If the call results in an exception, it saves the exception as the task 70 result, and sets the task status to `"FAILURE"`. 71 72 :param task_name: The name of the task to execute. 73 :param task_id: The unique id of the task. 74 :param args: List of positional args to pass on to the function. 75 :param kwargs: Keyword arguments mapping to pass on to the function. 76 77 :keyword loader: Custom loader to use, if not specified the current app 78 loader will be used. 79 :keyword hostname: Custom hostname to use, if not specified the system 80 hostname will be used. 81 82 :returns: the evaluated functions return value on success, or 83 the exception instance on failure. 84 85 """ 86 87 #: Current loader. 88 loader = None 89 90 #: Hostname to report as. 91 hostname = None 92 93 def __init__(self, *args, **kwargs): 94 self.loader = kwargs.get("loader") or current_app.loader 95 self.hostname = kwargs.get("hostname") or socket.gethostname() 96 super(WorkerTaskTrace, self).__init__(*args, **kwargs) 97 98 self._store_errors = True 99 if self.task.ignore_result: 100 self._store_errors = self.task.store_errors_even_if_ignored 101 self.super = super(WorkerTaskTrace, self) 102 103 def execute_safe(self, *args, **kwargs): 104 """Same as :meth:`execute`, but catches errors.""" 105 try: 106 return self.execute(*args, **kwargs) 107 except Exception, exc: 108 _type, _value, _tb = sys.exc_info() 109 _value = self.task.backend.prepare_exception(exc) 110 exc_info = ExceptionInfo((_type, _value, _tb)) 111 warnings.warn("Exception outside body: %s: %s\n%s" % tuple( 112 map(str, (exc.__class__, exc, exc_info.traceback)))) 113 return exc_info 114 115 def execute(self): 116 """Execute, trace and store the result of the task.""" 117 self.loader.on_task_init(self.task_id, self.task) 118 if self.task.track_started: 119 if not self.task.ignore_result: 120 self.task.backend.mark_as_started(self.task_id, 121 pid=os.getpid(), 122 hostname=self.hostname) 123 try: 124 return super(WorkerTaskTrace, self).execute() 125 finally: 126 try: 127 self.task.backend.process_cleanup() 128 self.loader.on_process_cleanup() 129 except (KeyboardInterrupt, SystemExit, MemoryError): 130 raise 131 except Exception, exc: 132 logger = current_app.log.get_default_logger() 133 logger.error("Process cleanup failed: %r", exc, 134 exc_info=sys.exc_info()) 135 136 def handle_success(self, retval, *args): 137 """Handle successful execution.""" 138 if not self.task.ignore_result: 139 self.task.backend.mark_as_done(self.task_id, retval) 140 return self.super.handle_success(retval, *args) 141 142 def handle_retry(self, exc, type_, tb, strtb): 143 """Handle retry exception.""" 144 message, orig_exc = exc.args 145 if self._store_errors: 146 self.task.backend.mark_as_retry(self.task_id, orig_exc, strtb) 147 return self.super.handle_retry(exc, type_, tb, strtb) 148 149 def handle_failure(self, exc, type_, tb, strtb): 150 """Handle exception.""" 151 if self._store_errors: 152 self.task.backend.mark_as_failure(self.task_id, exc, strtb) 153 exc = get_pickleable_exception(exc) 154 return self.super.handle_failure(exc, type_, tb, strtb) 155 156 157 def execute_and_trace(task_name, *args, **kwargs): 158 """This is a pickleable method used as a target when applying to pools. 159 160 It's the same as:: 161 162 >>> WorkerTaskTrace(task_name, *args, **kwargs).execute_safe() 163 164 """ 165 hostname = kwargs.get("hostname") 166 platforms.set_mp_process_title("celeryd", task_name, hostname=hostname) 167 try: 168 return WorkerTaskTrace(task_name, *args, **kwargs).execute_safe() 169 finally: 170 platforms.set_mp_process_title("celeryd", "-idle-", hostname) 171 172 173 class TaskRequest(object): 174 """A request for task execution.""" 175 176 #: Kind of task. Must be a name registered in the task registry. 177 name = None 178 179 #: The task class (set by constructor using :attr:`task_name`). 180 task = None 181 182 #: UUID of the task. 183 task_id = None 184 185 #: UUID of the taskset that this task belongs to. 186 taskset_id = None 187 188 #: List of positional arguments to apply to the task. 189 args = None 190 191 #: Mapping of keyword arguments to apply to the task. 192 kwargs = None 193 194 #: Number of times the task has been retried. 195 retries = 0 196 197 #: The tasks eta (for information only). 198 eta = None 199 200 #: When the task expires. 201 expires = None 202 203 #: Body of a chord depending on this task. 204 chord = None 205 206 #: Callback called when the task should be acknowledged. 207 on_ack = None 208 209 #: The message object. Used to acknowledge the message. 210 message = None 211 212 #: Additional delivery info, e.g. contains the path from 213 #: Producer to consumer. 214 delivery_info = None 215 216 #: Flag set when the task has been acknowledged. 217 acknowledged = False 218 219 #: Format string used to log task success. 220 success_msg = """\ 221 Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s 222 """ 223 224 #: Format string used to log task failure. 225 error_msg = """\ 226 Task %(name)s[%(id)s] raised exception: %(exc)s\n%(traceback)s 227 """ 228 229 #: Format string used to log task retry. 230 retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s""" 231 232 #: Timestamp set when the task is started. 233 time_start = None 234 235 #: Process id of the worker processing this task (if any). 236 worker_pid = None 237 238 _already_revoked = False 239 _terminate_on_ack = None 240 241 def __init__(self, task_name, task_id, args, kwargs, 242 on_ack=noop, retries=0, delivery_info=None, hostname=None, 243 logger=None, eventer=None, eta=None, expires=None, app=None, 244 taskset_id=None, chord=None, **opts): 245 self.app = app_or_default(app) 246 self.task_name = task_name 247 self.task_id = task_id 248 self.taskset_id = taskset_id 249 self.retries = retries 250 self.args = args 251 self.kwargs = kwargs 252 self.eta = eta 253 self.expires = expires 254 self.chord = chord 255 self.on_ack = on_ack 256 self.delivery_info = {} if delivery_info is None else delivery_info 257 self.hostname = hostname or socket.gethostname() 258 self.logger = logger or self.app.log.get_default_logger() 259 self.eventer = eventer 260 261 self.task = registry.tasks[self.task_name] 262 self._store_errors = True 263 if self.task.ignore_result: 264 self._store_errors = self.task.store_errors_even_if_ignored 265 266 @classmethod 267 def from_message(cls, message, body, on_ack=noop, **kw): 268 """Create request from a task message. 269 270 :raises UnknownTaskError: if the message does not describe a task, 271 the message is also rejected. 272 273 """ 274 delivery_info = getattr(message, "delivery_info", {}) 275 delivery_info = dict((key, delivery_info.get(key)) 276 for key in WANTED_DELIVERY_INFO) 277 278 kwargs = body.get("kwargs", {}) 279 if not hasattr(kwargs, "items"): 280 raise InvalidTaskError("Task keyword arguments is not a mapping.") 281 try: 282 task_name = body["task"] 283 task_id = body["id"] 284 except KeyError, exc: 285 raise InvalidTaskError( 286 "Task message is missing required field %r" % (exc, )) 287 288 return cls(task_name=task_name, 289 task_id=task_id, 290 taskset_id=body.get("taskset", None), 291 args=body.get("args", []), 292 kwargs=kwdict(kwargs), 293 chord=body.get("chord"), 294 retries=body.get("retries", 0), 295 eta=maybe_iso8601(body.get("eta")), 296 expires=maybe_iso8601(body.get("expires")), 297 on_ack=on_ack, delivery_info=delivery_info, **kw) 298 299 def get_instance_attrs(self, loglevel, logfile): 300 return {"logfile": logfile, "loglevel": loglevel, 301 "hostname": self.hostname, 302 "id": self.task_id, "taskset": self.taskset_id, 303 "retries": self.retries, "is_eager": False, 304 "delivery_info": self.delivery_info, "chord": self.chord} 305 306 def extend_with_default_kwargs(self, loglevel, logfile): 307 """Extend the tasks keyword arguments with standard task arguments. 308 309 Currently these are `logfile`, `loglevel`, `task_id`, 310 `task_name`, `task_retries`, and `delivery_info`. 311 312 See :meth:`celery.task.base.Task.run` for more information. 313 314 Magic keyword arguments are deprecated and will be removed 315 in version 3.0. 316 317 """ 318 if not self.task.accept_magic_kwargs: 319 return self.kwargs 320 kwargs = dict(self.kwargs) 321 default_kwargs = {"logfile": logfile, 322 "loglevel": loglevel, 323 "task_id": self.task_id, 324 "task_name": self.task_name, 325 "task_retries": self.retries, 326 "task_is_eager": False, 327 "delivery_info": self.delivery_info} 328 fun = self.task.run 329 supported_keys = fun_takes_kwargs(fun, default_kwargs) 330 extend_with = dict((key, val) for key, val in default_kwargs.items() 331 if key in supported_keys) 332 kwargs.update(extend_with) 333 return kwargs 334 335 def execute_using_pool(self, pool, loglevel=None, logfile=None): 336 """Like :meth:`execute`, but using the :mod:`multiprocessing` pool. 337 338 :param pool: A :class:`multiprocessing.Pool` instance. 339 340 :keyword loglevel: The loglevel used by the task. 341 342 :keyword logfile: The logfile used by the task. 343 344 """ 345 if self.revoked(): 346 return 347 348 args = self._get_tracer_args(loglevel, logfile) 349 instance_attrs = self.get_instance_attrs(loglevel, logfile) 350 result = pool.apply_async(execute_and_trace, 351 args=args, 352 kwargs={"hostname": self.hostname, 353 "request": instance_attrs}, 354 accept_callback=self.on_accepted, 355 timeout_callback=self.on_timeout, 356 callback=self.on_success, 357 errback=self.on_failure, 358 soft_timeout=self.task.soft_time_limit, 359 timeout=self.task.time_limit) 360 return result 361 362 def execute(self, loglevel=None, logfile=None): 363 """Execute the task in a :class:`WorkerTaskTrace`. 364 365 :keyword loglevel: The loglevel used by the task. 366 367 :keyword logfile: The logfile used by the task. 368 369 """ 370 if self.revoked(): 371 return 372 373 # acknowledge task as being processed. 374 if not self.task.acks_late: 375 self.acknowledge() 376 377 instance_attrs = self.get_instance_attrs(loglevel, logfile) 378 tracer = WorkerTaskTrace(*self._get_tracer_args(loglevel, logfile), 379 **{"hostname": self.hostname, 380 "loader": self.app.loader, 381 "request": instance_attrs}) 382 retval = tracer.execute() 383 self.acknowledge() 384 return retval 385 386 def maybe_expire(self): 387 """If expired, mark the task as revoked.""" 388 if self.expires and datetime.now() > self.expires: 389 state.revoked.add(self.task_id) 390 if self._store_errors: 391 self.task.backend.mark_as_revoked(self.task_id) 392 393 def terminate(self, pool, signal=None): 394 if self.time_start: 395 return pool.terminate_job(self.worker_pid, signal) 396 else: 397 self._terminate_on_ack = (True, pool, signal) 398 399 def revoked(self): 400 """If revoked, skip task and mark state.""" 401 if self._already_revoked: 402 return True 403 if self.expires: 404 self.maybe_expire() 405 if self.task_id in state.revoked: 406 self.logger.warn("Skipping revoked task: %s[%s]", 407 self.task_name, self.task_id) 408 self.send_event("task-revoked", uuid=self.task_id) 409 self.acknowledge() 410 self._already_revoked = True 411 return True 412 return False 413 414 def send_event(self, type, **fields): 415 if self.eventer: 416 self.eventer.send(type, **fields) 417 418 def on_accepted(self, pid, time_accepted): 419 """Handler called when task is accepted by worker pool.""" 420 self.worker_pid = pid 421 self.time_start = time_accepted 422 state.task_accepted(self) 423 if not self.task.acks_late: 424 self.acknowledge() 425 self.send_event("task-started", uuid=self.task_id, pid=pid) 426 self.logger.debug("Task accepted: %s[%s] pid:%r", 427 self.task_name, self.task_id, pid) 428 if self._terminate_on_ack is not None: 429 _, pool, signal = self._terminate_on_ack 430 self.terminate(pool, signal) 431 432 def on_timeout(self, soft, timeout): 433 """Handler called if the task times out.""" 434 state.task_ready(self) 435 if soft: 436 self.logger.warning("Soft time limit (%ss) exceeded for %s[%s]", 437 timeout, self.task_name, self.task_id) 438 exc = exceptions.SoftTimeLimitExceeded(timeout) 439 else: 440 self.logger.error("Hard time limit (%ss) exceeded for %s[%s]", 441 timeout, self.task_name, self.task_id) 442 exc = exceptions.TimeLimitExceeded(timeout) 443 444 if self._store_errors: 445 self.task.backend.mark_as_failure(self.task_id, exc) 446 447 def on_success(self, ret_value): 448 """Handler called if the task was successfully processed.""" 449 state.task_ready(self) 450 451 if self.task.acks_late: 452 self.acknowledge() 453 454 runtime = self.time_start and (time.time() - self.time_start) or 0 455 self.send_event("task-succeeded", uuid=self.task_id, 456 result=safe_repr(ret_value), runtime=runtime) 457 458 self.logger.info(self.success_msg.strip(), 459 {"id": self.task_id, 460 "name": self.task_name, 461 "return_value": self.repr_result(ret_value), 462 "runtime": runtime}) 463 464 def on_retry(self, exc_info): 465 """Handler called if the task should be retried.""" 466 self.send_event("task-retried", uuid=self.task_id, 467 exception=safe_repr(exc_info.exception.exc), 468 traceback=safe_str(exc_info.traceback)) 469 470 self.logger.info(self.retry_msg.strip(), 471 {"id": self.task_id, 472 "name": self.task_name, 473 "exc": safe_repr(exc_info.exception.exc)}, 474 exc_info=exc_info) 475 476 def on_failure(self, exc_info): 477 """Handler called if the task raised an exception.""" 478 state.task_ready(self) 479 480 if self.task.acks_late: 481 self.acknowledge() 482 483 if isinstance(exc_info.exception, exceptions.RetryTaskError): 484 return self.on_retry(exc_info) 485 486 # This is a special case as the process would not have had 487 # time to write the result. 488 if isinstance(exc_info.exception, exceptions.WorkerLostError) and \ 489 self._store_errors: 490 self.task.backend.mark_as_failure(self.task_id, exc_info.exception) 491 492 self.send_event("task-failed", uuid=self.task_id, 493 exception=safe_repr(exc_info.exception), 494 traceback=safe_str(exc_info.traceback)) 495 496 context = {"hostname": self.hostname, 497 "id": self.task_id, 498 "name": self.task_name, 499 "exc": safe_repr(exc_info.exception), 500 "traceback": safe_str(exc_info.traceback), 501 "args": safe_repr(self.args), 502 "kwargs": safe_repr(self.kwargs)} 503 504 self.logger.error(self.error_msg.strip(), context, 505 exc_info=exc_info.exc_info, 506 extra={"data": {"id": self.task_id, 507 "name": self.task_name, 508 "hostname": self.hostname}}) 509 510 task_obj = registry.tasks.get(self.task_name, object) 511 task_obj.send_error_email(context, exc_info.exception) 512 513 def acknowledge(self): 514 """Acknowledge task.""" 515 if not self.acknowledged: 516 self.on_ack() 517 self.acknowledged = True 518 519 def repr_result(self, result, maxlen=46): 520 # 46 is the length needed to fit 521 # "the quick brown fox jumps over the lazy dog" :) 522 return truncate_text(safe_repr(result), maxlen) 523 524 def info(self, safe=False): 525 return {"id": self.task_id, 526 "name": self.task_name, 527 "args": self.args if safe else safe_repr(self.args), 528 "kwargs": self.kwargs if safe else safe_repr(self.kwargs), 529 "hostname": self.hostname, 530 "time_start": self.time_start, 531 "acknowledged": self.acknowledged, 532 "delivery_info": self.delivery_info, 533 "worker_pid": self.worker_pid} 534 535 def shortinfo(self): 536 return "%s[%s]%s%s" % ( 537 self.task_name, 538 self.task_id, 539 " eta:[%s]" % (self.eta, ) if self.eta else "", 540 " expires:[%s]" % (self.expires, ) if self.expires else "") 541 __str__ = shortinfo 542 543 def __repr__(self): 544 return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % ( 545 self.__class__.__name__, 546 self.task_name, self.task_id, self.args, self.kwargs) 547 548 def _get_tracer_args(self, loglevel=None, logfile=None): 549 """Get the :class:`WorkerTaskTrace` tracer for this task.""" 550 task_func_kwargs = self.extend_with_default_kwargs(loglevel, logfile) 551 return self.task_name, self.task_id, self.args, task_func_kwargs 552 [end of celery/worker/job.py] [start of celery/worker/mediator.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.mediator 4 ~~~~~~~~~~~~~~~~~~~~~~ 5 6 The mediator is an internal thread that moves tasks 7 from an internal :class:`Queue` to the worker pool. 8 9 This is only used if rate limits are enabled, as it moves 10 messages from the rate limited queue (which holds tasks 11 that are allowed to be processed) to the pool. Disabling 12 rate limits will also disable this machinery, 13 and can improve performance. 14 15 :copyright: (c) 2009 - 2011 by Ask Solem. 16 :license: BSD, see LICENSE for more details. 17 18 """ 19 from __future__ import absolute_import 20 21 import os 22 import sys 23 import threading 24 import traceback 25 26 from Queue import Empty 27 28 from ..app import app_or_default 29 30 31 class Mediator(threading.Thread): 32 33 #: The task queue, a :class:`~Queue.Queue` instance. 34 ready_queue = None 35 36 #: Callback called when a task is obtained. 37 callback = None 38 39 def __init__(self, ready_queue, callback, logger=None, app=None): 40 threading.Thread.__init__(self) 41 self.app = app_or_default(app) 42 self.logger = logger or self.app.log.get_default_logger() 43 self.ready_queue = ready_queue 44 self.callback = callback 45 self._is_shutdown = threading.Event() 46 self._is_stopped = threading.Event() 47 self.setDaemon(True) 48 self.setName(self.__class__.__name__) 49 50 def move(self): 51 try: 52 task = self.ready_queue.get(timeout=1.0) 53 except Empty: 54 return 55 56 if task.revoked(): 57 return 58 59 self.logger.debug( 60 "Mediator: Running callback for task: %s[%s]" % ( 61 task.task_name, task.task_id)) 62 63 try: 64 self.callback(task) 65 except Exception, exc: 66 self.logger.error("Mediator callback raised exception %r\n%s", 67 exc, traceback.format_exc(), 68 exc_info=sys.exc_info(), 69 extra={"data": {"id": task.task_id, 70 "name": task.task_name, 71 "hostname": task.hostname}}) 72 73 def run(self): 74 """Move tasks until :meth:`stop` is called.""" 75 while not self._is_shutdown.isSet(): 76 try: 77 self.move() 78 except Exception, exc: 79 self.logger.error("Mediator crash: %r", exc, exc_info=True) 80 # exiting by normal means does not work here, so force exit. 81 os._exit(1) 82 self._is_stopped.set() 83 84 def stop(self): 85 """Gracefully shutdown the thread.""" 86 self._is_shutdown.set() 87 self._is_stopped.wait() 88 self.join(1e10) 89 [end of celery/worker/mediator.py] [start of celery/worker/state.py] 1 # -*- coding: utf-8 -*- 2 """ 3 celery.worker.state 4 ~~~~~~~~~~~~~~~~~~~ 5 6 Internal worker state (global) 7 8 This includes the currently active and reserved tasks, 9 statistics, and revoked tasks. 10 11 :copyright: (c) 2009 - 2011 by Ask Solem. 12 :license: BSD, see LICENSE for more details. 13 14 """ 15 from __future__ import absolute_import 16 17 import os 18 import platform 19 import shelve 20 21 from collections import defaultdict 22 23 from .. import __version__ 24 from ..datastructures import LimitedSet 25 from ..utils import cached_property 26 27 #: Worker software/platform information. 28 SOFTWARE_INFO = {"sw_ident": "celeryd", 29 "sw_ver": __version__, 30 "sw_sys": platform.system()} 31 32 #: maximum number of revokes to keep in memory. 33 REVOKES_MAX = 10000 34 35 #: how many seconds a revoke will be active before 36 #: being expired when the max limit has been exceeded. 37 REVOKE_EXPIRES = 3600 38 39 #: set of all reserved :class:`~celery.worker.job.TaskRequest`'s. 40 reserved_requests = set() 41 42 #: set of currently active :class:`~celery.worker.job.TaskRequest`'s. 43 active_requests = set() 44 45 #: count of tasks executed by the worker, sorted by type. 46 total_count = defaultdict(lambda: 0) 47 48 #: the list of currently revoked tasks. Persistent if statedb set. 49 revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) 50 51 52 def task_reserved(request): 53 """Updates global state when a task has been reserved.""" 54 reserved_requests.add(request) 55 56 57 def task_accepted(request): 58 """Updates global state when a task has been accepted.""" 59 active_requests.add(request) 60 total_count[request.task_name] += 1 61 62 63 def task_ready(request): 64 """Updates global state when a task is ready.""" 65 active_requests.discard(request) 66 reserved_requests.discard(request) 67 68 69 if os.environ.get("CELERY_BENCH"): # pragma: no cover 70 from time import time 71 72 all_count = 0 73 bench_start = None 74 bench_every = int(os.environ.get("CELERY_BENCH_EVERY", 1000)) 75 __reserved = task_reserved 76 __ready = task_ready 77 78 def task_reserved(request): # noqa 79 global bench_start 80 if bench_start is None: 81 bench_start = time() 82 return __reserved(request) 83 84 def task_ready(request): # noqa 85 global all_count, bench_start 86 all_count += 1 87 if not all_count % bench_every: 88 print("* Time spent processing %s tasks (since first " 89 "task received): ~%.4fs\n" % ( 90 bench_every, time() - bench_start)) 91 bench_start = None 92 93 return __ready(request) 94 95 96 class Persistent(object): 97 storage = shelve 98 _is_open = False 99 100 def __init__(self, filename): 101 self.filename = filename 102 self._load() 103 104 def save(self): 105 self.sync(self.db) 106 self.db.sync() 107 self.close() 108 109 def merge(self, d): 110 revoked.update(d.get("revoked") or {}) 111 return d 112 113 def sync(self, d): 114 prev = d.get("revoked") or {} 115 prev.update(revoked.as_dict()) 116 d["revoked"] = prev 117 return d 118 119 def open(self): 120 return self.storage.open(self.filename, writeback=True) 121 122 def close(self): 123 if self._is_open: 124 self.db.close() 125 self._is_open = False 126 127 def _load(self): 128 self.merge(self.db) 129 130 @cached_property 131 def db(self): 132 self._is_open = True 133 return self.open() 134 [end of celery/worker/state.py] [start of funtests/bench/worker.py] 1 import time 2 3 from celery import Celery 4 5 celery = Celery() 6 celery.conf.update(BROKER_TRANSPORT="memory", 7 BROKER_POOL_LIMIT=1, 8 CELERY_PREFETCH_MULTIPLIER=0, 9 CELERY_DISABLE_RATE_LIMITS=True, 10 CELERY_BACKEND=None) 11 12 13 def bench_consumer(n=10000): 14 from celery.worker import WorkController 15 from celery.worker import state 16 17 worker = WorkController(app=celery, pool_cls="solo") 18 time_start = [None] 19 20 @celery.task() 21 def it(i): 22 if not i: 23 time_start[0] = time.time() 24 elif i == n - 1: 25 print(time.time() - time_start[0]) 26 27 @celery.task() 28 def shutdown_worker(): 29 raise SystemExit() 30 31 for i in xrange(n): 32 it.delay(i) 33 shutdown_worker.delay() 34 35 try: 36 worker.start() 37 except SystemExit: 38 assert sum(state.total_count.values()) == n + 1 39 40 41 if __name__ == "__main__": 42 bench_consumer() 43 [end of funtests/bench/worker.py] [start of pavement.py] 1 import sys 2 from paver.easy import * 3 from paver import doctools 4 from paver.setuputils import setup 5 6 PYCOMPILE_CACHES = ["*.pyc", "*$py.class"] 7 8 options( 9 sphinx=Bunch(builddir=".build"), 10 ) 11 12 13 def sphinx_builddir(options): 14 return path("docs") / options.sphinx.builddir / "html" 15 16 17 @task 18 def clean_docs(options): 19 sphinx_builddir(options).rmtree() 20 21 22 @task 23 @needs("clean_docs", "paver.doctools.html") 24 def html(options): 25 destdir = path("Documentation") 26 destdir.rmtree() 27 builtdocs = sphinx_builddir(options) 28 builtdocs.move(destdir) 29 30 31 @task 32 @needs("paver.doctools.html") 33 def qhtml(options): 34 destdir = path("Documentation") 35 builtdocs = sphinx_builddir(options) 36 sh("rsync -az %s/ %s" % (builtdocs, destdir)) 37 38 39 @task 40 @needs("clean_docs", "paver.doctools.html") 41 def ghdocs(options): 42 builtdocs = sphinx_builddir(options) 43 sh("git checkout gh-pages && \ 44 cp -r %s/* . && \ 45 git commit . -m 'Rendered documentation for Github Pages.' && \ 46 git push origin gh-pages && \ 47 git checkout master" % builtdocs) 48 49 50 @task 51 @needs("clean_docs", "paver.doctools.html") 52 def upload_pypi_docs(options): 53 builtdocs = path("docs") / options.builddir / "html" 54 sh("%s setup.py upload_sphinx --upload-dir='%s'" % ( 55 sys.executable, builtdocs)) 56 57 58 @task 59 @needs("upload_pypi_docs", "ghdocs") 60 def upload_docs(options): 61 pass 62 63 64 @task 65 def autodoc(options): 66 sh("contrib/release/doc4allmods celery") 67 68 69 @task 70 def verifyindex(options): 71 sh("contrib/release/verify-reference-index.sh") 72 73 74 @task 75 def verifyconfigref(options): 76 sh("PYTHONPATH=. %s contrib/release/verify_config_reference.py \ 77 docs/configuration.rst" % (sys.executable, )) 78 79 80 @task 81 @cmdopts([ 82 ("noerror", "E", "Ignore errors"), 83 ]) 84 def flake8(options): 85 noerror = getattr(options, "noerror", False) 86 complexity = getattr(options, "complexity", 22) 87 sh("""flake8 celery | perl -mstrict -mwarnings -nle' 88 my $ignore = m/too complex \((\d+)\)/ && $1 le %s; 89 if (! $ignore) { print STDERR; our $FOUND_FLAKE = 1 } 90 }{exit $FOUND_FLAKE; 91 '""" % (complexity, ), ignore_error=noerror) 92 93 94 @task 95 @cmdopts([ 96 ("noerror", "E", "Ignore errors"), 97 ]) 98 def flakeplus(options): 99 noerror = getattr(options, "noerror", False) 100 sh("python contrib/release/flakeplus.py celery", 101 ignore_error=noerror) 102 103 104 @task 105 @cmdopts([ 106 ("noerror", "E", "Ignore errors") 107 ]) 108 def flakes(options): 109 flake8(options) 110 flakeplus(options) 111 112 113 @task 114 def clean_readme(options): 115 path("README").unlink() 116 path("README.rst").unlink() 117 118 119 @task 120 @needs("clean_readme") 121 def readme(options): 122 sh("%s contrib/release/sphinx-to-rst.py docs/templates/readme.txt \ 123 > README.rst" % (sys.executable, )) 124 sh("ln -sf README.rst README") 125 126 127 @task 128 def bump(options): 129 sh("bump -c celery") 130 131 132 @task 133 @cmdopts([ 134 ("coverage", "c", "Enable coverage"), 135 ("quick", "q", "Quick test"), 136 ("verbose", "V", "Make more noise"), 137 ]) 138 def test(options): 139 cmd = "CELERY_LOADER=default nosetests" 140 if getattr(options, "coverage", False): 141 cmd += " --with-coverage3" 142 if getattr(options, "quick", False): 143 cmd = "QUICKTEST=1 SKIP_RLIMITS=1 %s" % cmd 144 if getattr(options, "verbose", False): 145 cmd += " --verbosity=2" 146 sh(cmd) 147 148 149 @task 150 @cmdopts([ 151 ("noerror", "E", "Ignore errors"), 152 ]) 153 def pep8(options): 154 noerror = getattr(options, "noerror", False) 155 return sh("""find . -name "*.py" | xargs pep8 | perl -nle'\ 156 print; $a=1 if $_}{exit($a)'""", ignore_error=noerror) 157 158 159 @task 160 def removepyc(options): 161 sh("find . -type f -a \\( %s \\) | xargs rm" % ( 162 " -o ".join("-name '%s'" % (pat, ) for pat in PYCOMPILE_CACHES), )) 163 164 165 @task 166 @needs("removepyc") 167 def gitclean(options): 168 sh("git clean -xdn") 169 170 171 @task 172 @needs("removepyc") 173 def gitcleanforce(options): 174 sh("git clean -xdf") 175 176 177 @task 178 @needs("flakes", "autodoc", "verifyindex", 179 "verifyconfigref", "test", "gitclean") 180 def releaseok(options): 181 pass 182 183 184 @task 185 @needs("releaseok", "removepyc", "upload_docs") 186 def release(options): 187 pass 188 189 190 @task 191 def coreloc(options): 192 sh("xargs sloccount < contrib/release/core-modules.txt") 193 194 195 @task 196 def testloc(options): 197 sh("sloccount celery/tests") 198 199 200 @task 201 def loc(options): 202 sh("sloccount celery") 203 [end of pavement.py] [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 import os 4 import sys 5 import codecs 6 import platform 7 8 if sys.version_info < (2, 5): 9 raise Exception("Celery requires Python 2.5 or higher.") 10 11 try: 12 from setuptools import setup, find_packages 13 from setuptools.command.test import test 14 except ImportError: 15 raise 16 from ez_setup import use_setuptools 17 use_setuptools() 18 from setuptools import setup, find_packages # noqa 19 from setuptools.command.test import test # noqa 20 21 NAME = "celery" 22 entrypoints = {} 23 extra = {} 24 25 # -*- Classifiers -*- 26 27 classes = """ 28 Development Status :: 5 - Production/Stable 29 License :: OSI Approved :: BSD License 30 Topic :: System :: Distributed Computing 31 Topic :: Software Development :: Object Brokering 32 Intended Audience :: Developers 33 Intended Audience :: Information Technology 34 Intended Audience :: Science/Research 35 Intended Audience :: Financial and Insurance Industry 36 Intended Audience :: Healthcare Industry 37 Environment :: No Input/Output (Daemon) 38 Environment :: Console 39 Programming Language :: Python 40 Programming Language :: Python :: 2 41 Programming Language :: Python :: 2.5 42 Programming Language :: Python :: 2.6 43 Programming Language :: Python :: 2.7 44 Programming Language :: Python :: 3 45 Programming Language :: Python :: 3.2 46 Programming Language :: Python :: Implementation :: CPython 47 Programming Language :: Python :: Implementation :: PyPy 48 Programming Language :: Python :: Implementation :: Jython 49 Operating System :: OS Independent 50 Operating System :: POSIX 51 Operating System :: Microsoft :: Windows 52 Operating System :: MacOS :: MacOS X 53 """ 54 classifiers = [s.strip() for s in classes.split('\n') if s] 55 56 # -*- Python 3 -*- 57 is_py3k = sys.version_info >= (3, 0) 58 if is_py3k: 59 extra.update(use_2to3=True) 60 61 # -*- Distribution Meta -*- 62 63 os.environ["CELERY_NO_EVAL"] = "yes" 64 import celery as distmeta 65 os.environ.pop("CELERY_NO_EVAL", None) 66 sys.modules.pop("celery", None) 67 68 # -*- Custom Commands -*- 69 70 class quicktest(test): 71 extra_env = dict(SKIP_RLIMITS=1, QUICKTEST=1) 72 73 def run(self, *args, **kwargs): 74 for env_name, env_value in self.extra_env.items(): 75 os.environ[env_name] = str(env_value) 76 test.run(self, *args, **kwargs) 77 78 # -*- Installation Dependencies -*- 79 80 install_requires = [] 81 try: 82 import importlib # noqa 83 except ImportError: 84 install_requires.append("importlib") 85 install_requires.extend([ 86 "anyjson>=0.3.1", 87 "kombu>=1.4.3,<3.0.0", 88 ]) 89 if is_py3k: 90 install_requires.append("python-dateutil>=2.0.0") 91 else: 92 install_requires.append("python-dateutil>=1.5.0,<2.0.0") 93 94 py_version = sys.version_info 95 is_jython = sys.platform.startswith("java") 96 is_pypy = hasattr(sys, "pypy_version_info") 97 if sys.version_info < (2, 7): 98 install_requires.append("ordereddict") # Replacement for the ordered dict 99 if sys.version_info < (2, 6) and not (is_jython or is_pypy): 100 install_requires.append("multiprocessing") 101 102 if is_jython: 103 install_requires.append("threadpool") 104 install_requires.append("simplejson") 105 106 # -*- Tests Requires -*- 107 108 tests_require = ["nose", "nose-cover3", "sqlalchemy", "mock"] 109 if sys.version_info < (2, 7): 110 tests_require.append("unittest2") 111 elif sys.version_info <= (2, 5): 112 tests_require.append("simplejson") 113 114 # -*- Long Description -*- 115 116 if os.path.exists("README.rst"): 117 long_description = codecs.open("README.rst", "r", "utf-8").read() 118 else: 119 long_description = "See http://pypi.python.org/pypi/celery" 120 121 # -*- Entry Points -*- # 122 123 console_scripts = entrypoints["console_scripts"] = [ 124 'celerybeat = celery.bin.celerybeat:main', 125 'camqadm = celery.bin.camqadm:main', 126 'celeryev = celery.bin.celeryev:main', 127 'celeryctl = celery.bin.celeryctl:main', 128 'celeryd-multi = celery.bin.celeryd_multi:main', 129 ] 130 if platform.system() == "Windows": 131 console_scripts.append('celeryd = celery.bin.celeryd:windows_main') 132 else: 133 console_scripts.append('celeryd = celery.bin.celeryd:main') 134 135 # bundles: Only relevant for Celery developers. 136 entrypoints["bundle.bundles"] = ["celery = celery.contrib.bundles:bundles"] 137 138 # -*- %%% -*- 139 140 setup( 141 name="celery", 142 version=distmeta.__version__, 143 description=distmeta.__doc__, 144 author=distmeta.__author__, 145 author_email=distmeta.__contact__, 146 url=distmeta.__homepage__, 147 platforms=["any"], 148 license="BSD", 149 packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']), 150 zip_safe=False, 151 install_requires=install_requires, 152 tests_require=tests_require, 153 test_suite="nose.collector", 154 cmdclass={"quicktest": quicktest}, 155 classifiers=classifiers, 156 entry_points=entrypoints, 157 long_description=long_description, 158 **extra) 159 [end of setup.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
celery/celery
9998b55af267446a077b31fdf35806c59b943b2d
Introduce CELERYCTL variable in /etc/init.d/celeryd and /etc/default/celeryd I ran into a problem with '/etc/init.d/celeryd status' not finding celeryctl when using django-celery in a virtualenv. These changes replace the 'celeryctl' reference with a $CELERYCTL variable so /etc/default/celeryd can be updated with the celeryctl location similar to the way CELERYD_MULTI is handled.
2011-12-12T12:49:09Z
<patch> diff --git a/celery/__init__.py b/celery/__init__.py --- a/celery/__init__.py +++ b/celery/__init__.py @@ -5,32 +5,31 @@ from __future__ import absolute_import -import os -import sys - -VERSION = (2, 4, 5) - +VERSION = (2, 5, 0, "b1") __version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:]) __author__ = "Ask Solem" __contact__ = "ask@celeryproject.org" __homepage__ = "http://celeryproject.org" __docformat__ = "restructuredtext" +# -eof meta- + +import sys + if sys.version_info < (2, 5): raise Exception( "Python 2.4 is not supported by this version. " "Please use Celery versions 2.1.x or earlier.") +from .local import Proxy + def Celery(*args, **kwargs): from .app import App return App(*args, **kwargs) -if not os.environ.get("CELERY_NO_EVAL", False): - from .local import Proxy - - def _get_current_app(): - from .app import current_app - return current_app() - current_app = Proxy(_get_current_app) +def _get_current_app(): + from .app import current_app + return current_app() +current_app = Proxy(_get_current_app) diff --git a/celery/actors.py b/celery/actors.py new file mode 100644 --- /dev/null +++ b/celery/actors.py @@ -0,0 +1,30 @@ +from __future__ import absolute_import + +from celery.app import app_or_default + +import cl +import cl.presence + + +def construct(cls, instance, connection=None, *args, **kwargs): + app = instance.app = app_or_default(kwargs.pop("app", None)) + super(cls, instance).__init__(connection or app.broker_connection(), + *args, **kwargs) + + +class Actor(cl.Actor): + + def __init__(self, *args, **kwargs): + construct(Actor, self, *args, **kwargs) + + +class Agent(cl.Agent): + + def __init__(self, *args, **kwargs): + construct(Agent, self, *args, **kwargs) + + +class AwareAgent(cl.presence.AwareAgent): + + def __init__(self, *args, **kwargs): + construct(AwareAgent, self, *args, **kwargs) diff --git a/celery/app/__init__.py b/celery/app/__init__.py --- a/celery/app/__init__.py +++ b/celery/app/__init__.py @@ -15,12 +15,10 @@ import os import threading -from functools import wraps -from inspect import getargspec - from .. import registry from ..utils import cached_property, instantiate +from . import annotations from . import base # Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute @@ -114,11 +112,14 @@ class Task(BaseTask): def Worker(self, **kwargs): """Create new :class:`~celery.apps.worker.Worker` instance.""" - return instantiate("celery.apps.worker.Worker", app=self, **kwargs) + return instantiate("celery.apps.worker:Worker", app=self, **kwargs) + + def WorkController(self, **kwargs): + return instantiate("celery.worker:WorkController", app=self, **kwargs) def Beat(self, **kwargs): """Create new :class:`~celery.apps.beat.Beat` instance.""" - return instantiate("celery.apps.beat.Beat", app=self, **kwargs) + return instantiate("celery.apps.beat:Beat", app=self, **kwargs) def TaskSet(self, *args, **kwargs): """Create new :class:`~celery.task.sets.TaskSet`.""" @@ -166,23 +167,14 @@ def refresh_feed(url, **kwargs): def inner_create_task_cls(**options): def _create_task_cls(fun): - options["app"] = self - options.setdefault("accept_magic_kwargs", False) base = options.pop("base", None) or self.Task - @wraps(fun, assigned=("__module__", "__name__")) - def run(self, *args, **kwargs): - return fun(*args, **kwargs) - - # Save the argspec for this task so we can recognize - # which default task kwargs we're going to pass to it later. - # (this happens in celery.utils.fun_takes_kwargs) - run.argspec = getargspec(fun) - - cls_dict = dict(options, run=run, - __module__=fun.__module__, - __doc__=fun.__doc__) - T = type(fun.__name__, (base, ), cls_dict)() + T = type(fun.__name__, (base, ), dict({ + "app": self, + "accept_magic_kwargs": False, + "run": staticmethod(fun), + "__doc__": fun.__doc__, + "__module__": fun.__module__}, **options))() return registry.tasks[T.name] # global instance. return _create_task_cls @@ -191,11 +183,24 @@ def run(self, *args, **kwargs): return inner_create_task_cls(**options)(*args) return inner_create_task_cls(**options) + def annotate_task(self, task): + if self.annotations: + match = annotations._first_match(self.annotations, task) + for attr, value in (match or {}).iteritems(): + setattr(task, attr, value) + match_any = annotations._first_match_any(self.annotations) + for attr, value in (match_any or {}).iteritems(): + setattr(task, attr, value) + @cached_property def Task(self): """Default Task base class for this application.""" return self.create_task_cls() + @cached_property + def annotations(self): + return annotations.prepare(self.conf.CELERY_ANNOTATIONS) + def __repr__(self): return "<Celery: %s:0x%x>" % (self.main or "__main__", id(self), ) diff --git a/celery/app/amqp.py b/celery/app/amqp.py --- a/celery/app/amqp.py +++ b/celery/app/amqp.py @@ -145,7 +145,7 @@ def with_defaults(cls, queues, default_exchange, default_exchange_type): class TaskPublisher(messaging.Publisher): - auto_declare = True + auto_declare = False retry = False retry_policy = None @@ -154,6 +154,7 @@ def __init__(self, *args, **kwargs): self.retry = kwargs.pop("retry", self.retry) self.retry_policy = kwargs.pop("retry_policy", self.retry_policy or {}) + self.utc = kwargs.pop("enable_utc", False) super(TaskPublisher, self).__init__(*args, **kwargs) def declare(self): @@ -208,10 +209,10 @@ def delay_task(self, task_name, task_args=None, task_kwargs=None, if not isinstance(task_kwargs, dict): raise ValueError("task kwargs must be a dictionary") if countdown: # Convert countdown to ETA. - now = now or datetime.now() + now = now or datetime.utcnow() eta = now + timedelta(seconds=countdown) if isinstance(expires, int): - now = now or datetime.now() + now = now or datetime.utcnow() expires = now + timedelta(seconds=expires) eta = eta and eta.isoformat() expires = expires and expires.isoformat() @@ -222,8 +223,8 @@ def delay_task(self, task_name, task_args=None, task_kwargs=None, "kwargs": task_kwargs or {}, "retries": retries or 0, "eta": eta, - "expires": expires} - + "expires": expires, + "utc": self.utc} if taskset_id: body["taskset"] = taskset_id if chord: @@ -321,6 +322,7 @@ def TaskPublisher(self, *args, **kwargs): "serializer": conf.CELERY_TASK_SERIALIZER, "retry": conf.CELERY_TASK_PUBLISH_RETRY, "retry_policy": conf.CELERY_TASK_PUBLISH_RETRY_POLICY, + "enable_utc": conf.CELERY_ENABLE_UTC, "app": self} return TaskPublisher(*args, **self.app.merge(defaults, kwargs)) diff --git a/celery/app/annotations.py b/celery/app/annotations.py new file mode 100644 --- /dev/null +++ b/celery/app/annotations.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import + +from ..utils import firstmethod, instantiate, mpromise + +_first_match = firstmethod("annotate") +_first_match_any = firstmethod("annotate_any") + + +class MapAnnotation(dict): + + def annotate_any(self): + try: + return dict(self["*"]) + except KeyError: + pass + + def annotate(self, task): + try: + return dict(self[task.name]) + except KeyError: + pass + + +def prepare(annotations): + """Expands the :setting:`CELERY_ANNOTATIONS` setting.""" + + def expand_annotation(annotation): + if isinstance(annotation, dict): + return MapAnnotation(annotation) + elif isinstance(annotation, basestring): + return mpromise(instantiate, annotation) + return annotation + + if annotations is None: + return () + elif not isinstance(annotations, (list, tuple)): + annotations = (annotations, ) + return map(expand_annotation, annotations) diff --git a/celery/app/base.py b/celery/app/base.py --- a/celery/app/base.py +++ b/celery/app/base.py @@ -18,7 +18,8 @@ from contextlib import contextmanager from copy import deepcopy from functools import wraps -from threading import Lock + +from kombu.clocks import LamportClock from .. import datastructures from .. import platforms @@ -37,56 +38,6 @@ """ -class LamportClock(object): - """Lamport's logical clock. - - From Wikipedia: - - "A Lamport logical clock is a monotonically incrementing software counter - maintained in each process. It follows some simple rules: - - * A process increments its counter before each event in that process; - * When a process sends a message, it includes its counter value with - the message; - * On receiving a message, the receiver process sets its counter to be - greater than the maximum of its own value and the received value - before it considers the message received. - - Conceptually, this logical clock can be thought of as a clock that only - has meaning in relation to messages moving between processes. When a - process receives a message, it resynchronizes its logical clock with - the sender. - - .. seealso:: - - http://en.wikipedia.org/wiki/Lamport_timestamps - http://en.wikipedia.org/wiki/Lamport's_Distributed_ - Mutual_Exclusion_Algorithm - - *Usage* - - When sending a message use :meth:`forward` to increment the clock, - when receiving a message use :meth:`adjust` to sync with - the time stamp of the incoming message. - - """ - #: The clocks current value. - value = 0 - - def __init__(self, initial_value=0): - self.value = initial_value - self.mutex = Lock() - - def adjust(self, other): - with self.mutex: - self.value = max(self.value, other) + 1 - - def forward(self): - with self.mutex: - self.value += 1 - return self.value - - class Settings(datastructures.ConfigurationView): @property @@ -121,12 +72,12 @@ class BaseApp(object): IS_OSX = platforms.IS_OSX IS_WINDOWS = platforms.IS_WINDOWS - amqp_cls = "celery.app.amqp.AMQP" + amqp_cls = "celery.app.amqp:AMQP" backend_cls = None - events_cls = "celery.events.Events" - loader_cls = "celery.loaders.app.AppLoader" - log_cls = "celery.log.Logging" - control_cls = "celery.task.control.Control" + events_cls = "celery.events:Events" + loader_cls = "celery.loaders.app:AppLoader" + log_cls = "celery.log:Logging" + control_cls = "celery.task.control:Control" _pool = None @@ -322,6 +273,11 @@ def mail_admins(self, subject, body, fail_silently=False): use_ssl=self.conf.EMAIL_USE_SSL, use_tls=self.conf.EMAIL_USE_TLS) + def select_queues(self, queues=None): + if queues: + return self.amqp.queues.select_subset(queues, + self.conf.CELERY_CREATE_MISSING_QUEUES) + def either(self, default_key, *values): """Fallback to the value of a configuration key if none of the `*values` are true.""" diff --git a/celery/app/defaults.py b/celery/app/defaults.py --- a/celery/app/defaults.py +++ b/celery/app/defaults.py @@ -73,7 +73,7 @@ def to_python(self, value): "CONNECTION_TIMEOUT": Option(4, type="int"), "CONNECTION_RETRY": Option(True, type="bool"), "CONNECTION_MAX_RETRIES": Option(100, type="int"), - "POOL_LIMIT": Option(None, type="int"), + "POOL_LIMIT": Option(10, type="int"), "INSIST": Option(False, type="bool", deprecate_by="2.4", remove_by="3.0"), "USE_SSL": Option(False, type="bool"), @@ -88,6 +88,7 @@ def to_python(self, value): alt="CELERY_TASK_RESULT_EXPIRES"), "AMQP_TASK_RESULT_CONNECTION_MAX": Option(1, type="int", remove_by="2.5", alt="BROKER_POOL_LIMIT"), + "ANNOTATIONS": Option(type="any"), "BROADCAST_QUEUE": Option("celeryctl"), "BROADCAST_EXCHANGE": Option("celeryctl"), "BROADCAST_EXCHANGE_TYPE": Option("fanout"), @@ -102,6 +103,7 @@ def to_python(self, value): "DEFAULT_EXCHANGE_TYPE": Option("direct"), "DEFAULT_DELIVERY_MODE": Option(2, type="string"), "EAGER_PROPAGATES_EXCEPTIONS": Option(False, type="bool"), + "ENABLE_UTC": Option(False, type="bool"), "EVENT_SERIALIZER": Option("json"), "IMPORTS": Option((), type="tuple"), "IGNORE_RESULT": Option(False, type="bool"), @@ -112,6 +114,7 @@ def to_python(self, value): "REDIS_PORT": Option(None, type="int"), "REDIS_DB": Option(None, type="int"), "REDIS_PASSWORD": Option(None, type="string"), + "REDIS_MAX_CONNECTIONS": Option(None, type="int"), "RESULT_BACKEND": Option(None, type="string"), "RESULT_DB_SHORT_LIVED_SESSIONS": Option(False, type="bool"), "RESULT_DBURI": Option(), @@ -135,15 +138,19 @@ def to_python(self, value): "interval_step": 0.2}, type="dict"), "TASK_RESULT_EXPIRES": Option(timedelta(days=1), type="int"), "TASK_SERIALIZER": Option("pickle"), + "TIMEZONE": Option(None, type="string"), "TRACK_STARTED": Option(False, type="bool"), "REDIRECT_STDOUTS": Option(True, type="bool"), "REDIRECT_STDOUTS_LEVEL": Option("WARNING"), "QUEUES": Option(None, type="dict"), + "SECURITY_KEY": Option(None, type="string"), + "SECURITY_CERTIFICATE": Option(None, type="string"), + "SECURITY_CERT_STORE": Option(None, type="string"), }, "CELERYD": { "AUTOSCALER": Option("celery.worker.autoscale.Autoscaler"), "CONCURRENCY": Option(0, type="int"), - "ETA_SCHEDULER": Option(None, type="str"), + "ETA_SCHEDULER": Option(None, type="string"), "ETA_SCHEDULER_PRECISION": Option(1.0, type="float"), "HIJACK_ROOT_LOGGER": Option(True, type="bool"), "CONSUMER": Option("celery.worker.consumer.Consumer"), diff --git a/celery/app/task/__init__.py b/celery/app/task/__init__.py --- a/celery/app/task/__init__.py +++ b/celery/app/task/__init__.py @@ -15,12 +15,13 @@ import sys import threading +from ... import states from ...datastructures import ExceptionInfo from ...exceptions import MaxRetriesExceededError, RetryTaskError -from ...execute.trace import TaskTrace +from ...execute.trace import eager_trace_task from ...registry import tasks, _unpickle_task from ...result import EagerResult -from ...utils import fun_takes_kwargs, mattrgetter, uuid +from ...utils import fun_takes_kwargs, instantiate, mattrgetter, uuid from ...utils.mail import ErrorMail extract_exec_options = mattrgetter("queue", "routing_key", @@ -57,6 +58,9 @@ def get(self, key, default=None): except AttributeError: return default + def __repr__(self): + return "<Context: %r>" % (vars(self, )) + class TaskType(type): """Meta class for tasks. @@ -73,11 +77,15 @@ def __new__(cls, name, bases, attrs): new = super(TaskType, cls).__new__ task_module = attrs.get("__module__") or "__main__" - # Abstract class: abstract attribute should not be inherited. + if "__call__" in attrs: + # see note about __call__ below. + attrs["__defines_call__"] = True + + # - Abstract class: abstract attribute should not be inherited. if attrs.pop("abstract", None) or not attrs.get("autoregister", True): return new(cls, name, bases, attrs) - # Automatically generate missing/empty name. + # - Automatically generate missing/empty name. autoname = False if not attrs.get("name"): try: @@ -88,6 +96,23 @@ def __new__(cls, name, bases, attrs): attrs["name"] = '.'.join([module_name, name]) autoname = True + # - Automatically generate __call__. + # If this or none of its bases define __call__, we simply + # alias it to the ``run`` method, as + # this means we can skip a stacktrace frame :) + if not (attrs.get("__call__") + or any(getattr(b, "__defines_call__", False) for b in bases)): + try: + attrs["__call__"] = attrs["run"] + except KeyError: + + # the class does not yet define run, + # so we can't optimize this case. + def __call__(self, *args, **kwargs): + return self.run(*args, **kwargs) + attrs["__call__"] = __call__ + + # - Create and register class. # Because of the way import happens (recursively) # we may or may not be the first time the task tries to register # with the framework. There should only be one class for each task @@ -99,6 +124,9 @@ def __new__(cls, name, bases, attrs): task_name = task_cls.name = '.'.join([task_cls.app.main, name]) tasks.register(task_cls) task = tasks[task_name].__class__ + + # decorate with annotations from config. + task.app.annotate_task(task) return task def __repr__(cls): @@ -114,6 +142,7 @@ class BaseTask(object): """ __metaclass__ = TaskType + __tracer__ = None ErrorMail = ErrorMail MaxRetriesExceededError = MaxRetriesExceededError @@ -243,8 +272,8 @@ class BaseTask(object): #: The type of task *(no longer used)*. type = "regular" - def __call__(self, *args, **kwargs): - return self.run(*args, **kwargs) + #: Execution strategy used, or the qualified name of one. + Strategy = "celery.worker.strategy:default" def __reduce__(self): return (_unpickle_task, (self.name, ), None) @@ -253,6 +282,9 @@ def run(self, *args, **kwargs): """The body of the task executed by workers.""" raise NotImplementedError("Tasks must define the run method.") + def start_strategy(self, app, consumer): + return instantiate(self.Strategy, self, app, consumer) + @classmethod def get_logger(self, loglevel=None, logfile=None, propagate=False, **kwargs): @@ -530,9 +562,11 @@ def retry(self, args=None, kwargs=None, exc=None, throw=True, "eta": eta}) if max_retries is not None and options["retries"] > max_retries: - raise exc or self.MaxRetriesExceededError( - "Can't retry %s[%s] args:%s kwargs:%s" % ( - self.name, options["task_id"], args, kwargs)) + if exc: + raise + raise self.MaxRetriesExceededError( + "Can't retry %s[%s] args:%s kwargs:%s" % ( + self.name, options["task_id"], args, kwargs)) # If task was executed eagerly using apply(), # then the retry must also be executed eagerly. @@ -588,13 +622,14 @@ def apply(self, args=None, kwargs=None, **options): if key in supported_keys) kwargs.update(extend_with) - trace = TaskTrace(task.name, task_id, args, kwargs, - task=task, request=request, propagate=throw) - retval = trace.execute() + retval, info = eager_trace_task(task, task_id, args, kwargs, + request=request, propagate=throw) if isinstance(retval, ExceptionInfo): retval = retval.exception - return EagerResult(task_id, retval, trace.status, - traceback=trace.strtb) + state, tb = states.SUCCESS, '' + if info is not None: + state, tb = info.state, info.strtb + return EagerResult(task_id, retval, state, traceback=tb) @classmethod def AsyncResult(self, task_id): @@ -652,8 +687,7 @@ def after_return(self, status, retval, task_id, args, kwargs, einfo): The return value of this handler is ignored. """ - if self.request.chord: - self.backend.on_chord_part_return(self) + pass def on_failure(self, exc, task_id, args, kwargs, einfo): """Error handler. diff --git a/celery/apps/beat.py b/celery/apps/beat.py --- a/celery/apps/beat.py +++ b/celery/apps/beat.py @@ -9,7 +9,7 @@ from .. import __version__, platforms from .. import beat from ..app import app_or_default -from ..utils import get_full_cls_name, LOG_LEVELS +from ..utils import LOG_LEVELS, qualname from ..utils.timeutils import humanize_seconds STARTUP_INFO_FMT = """ @@ -104,8 +104,8 @@ def startup_info(self, beat): "conninfo": self.app.broker_connection().as_uri(), "logfile": self.logfile or "[stderr]", "loglevel": LOG_LEVELS[self.loglevel], - "loader": get_full_cls_name(self.app.loader.__class__), - "scheduler": get_full_cls_name(scheduler.__class__), + "loader": qualname(self.app.loader), + "scheduler": qualname(scheduler), "scheduler_info": scheduler.info, "hmax_interval": humanize_seconds(beat.max_interval), "max_interval": beat.max_interval, diff --git a/celery/apps/worker.py b/celery/apps/worker.py --- a/celery/apps/worker.py +++ b/celery/apps/worker.py @@ -15,7 +15,7 @@ from .. import __version__, platforms, signals from ..app import app_or_default from ..exceptions import ImproperlyConfigured, SystemTerminate -from ..utils import get_full_cls_name, isatty, LOG_LEVELS, cry +from ..utils import isatty, LOG_LEVELS, cry, qualname from ..worker import WorkController try: @@ -45,6 +45,14 @@ %(tasks)s """ +UNKNOWN_QUEUE_ERROR = """\ +Trying to select queue subset of %r, but queue %s is not +defined in the CELERY_QUEUES setting. + +If you want to automatically declare unknown queues you can +enable the CELERY_CREATE_MISSING_QUEUES setting. +""" + def cpu_count(): if multiprocessing is not None: @@ -153,18 +161,11 @@ def on_consumer_ready(self, consumer): print("celery@%s has started." % self.hostname) def init_queues(self): - if self.use_queues: - create_missing = self.app.conf.CELERY_CREATE_MISSING_QUEUES - try: - self.app.amqp.queues.select_subset(self.use_queues, - create_missing) - except KeyError, exc: - raise ImproperlyConfigured( - "Trying to select queue subset of %r, but queue %s" - "is not defined in CELERY_QUEUES. If you want to " - "automatically declare unknown queues you have to " - "enable CELERY_CREATE_MISSING_QUEUES" % ( - self.use_queues, exc)) + try: + self.app.select_queues(self.use_queues) + except KeyError, exc: + raise ImproperlyConfigured( + UNKNOWN_QUEUE_ERROR % (self.use_queues, exc)) def init_loader(self): self.loader = self.app.loader @@ -221,7 +222,7 @@ def startup_info(self): "logfile": self.logfile or "[stderr]", "celerybeat": "ON" if self.run_clockservice else "OFF", "events": "ON" if self.events else "OFF", - "loader": get_full_cls_name(self.loader.__class__), + "loader": qualname(self.loader), "queues": app.amqp.queues.format(indent=18, indent_first=False), } diff --git a/celery/backends/__init__.py b/celery/backends/__init__.py --- a/celery/backends/__init__.py +++ b/celery/backends/__init__.py @@ -7,14 +7,14 @@ from ..utils.functional import memoize BACKEND_ALIASES = { - "amqp": "celery.backends.amqp.AMQPBackend", - "cache": "celery.backends.cache.CacheBackend", - "redis": "celery.backends.redis.RedisBackend", - "mongodb": "celery.backends.mongodb.MongoBackend", - "tyrant": "celery.backends.tyrant.TyrantBackend", - "database": "celery.backends.database.DatabaseBackend", - "cassandra": "celery.backends.cassandra.CassandraBackend", - "disabled": "celery.backends.base.DisabledBackend", + "amqp": "celery.backends.amqp:AMQPBackend", + "cache": "celery.backends.cache:CacheBackend", + "redis": "celery.backends.redis:RedisBackend", + "mongodb": "celery.backends.mongodb:MongoBackend", + "tyrant": "celery.backends.tyrant:TyrantBackend", + "database": "celery.backends.database:DatabaseBackend", + "cassandra": "celery.backends.cassandra:CassandraBackend", + "disabled": "celery.backends.base:DisabledBackend", } diff --git a/celery/backends/amqp.py b/celery/backends/amqp.py --- a/celery/backends/amqp.py +++ b/celery/backends/amqp.py @@ -37,6 +37,8 @@ class AMQPBackend(BaseDictBackend): BacklogLimitExceeded = BacklogLimitExceeded + supports_native_join = True + def __init__(self, connection=None, exchange=None, exchange_type=None, persistent=None, serializer=None, auto_delete=True, **kwargs): @@ -53,7 +55,7 @@ def __init__(self, connection=None, exchange=None, exchange_type=None, type=exchange_type, delivery_mode=delivery_mode, durable=self.persistent, - auto_delete=auto_delete) + auto_delete=False) self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER self.auto_delete = auto_delete diff --git a/celery/backends/base.py b/celery/backends/base.py --- a/celery/backends/base.py +++ b/celery/backends/base.py @@ -13,7 +13,7 @@ from ..datastructures import LRUCache from ..exceptions import TimeoutError, TaskRevokedError from ..utils import timeutils -from ..utils.encoding import from_utf8 +from ..utils.encoding import ensure_bytes, from_utf8 from ..utils.serialization import (get_pickled_exception, get_pickleable_exception, create_exception_cls) @@ -40,6 +40,9 @@ class BaseBackend(object): #: argument which is for each pass. subpolling_interval = None + #: If true the backend must implement :meth:`get_many`. + supports_native_join = False + def __init__(self, *args, **kwargs): from ..app import app_or_default self.app = app_or_default(kwargs.get("app")) @@ -200,11 +203,12 @@ def reload_taskset_result(self, task_id): raise NotImplementedError( "reload_taskset_result is not supported by this backend.") - def on_chord_part_return(self, task): + def on_chord_part_return(self, task, propagate=False): pass - def on_chord_apply(self, setid, body, *args, **kwargs): + def on_chord_apply(self, setid, body, result=None, **kwargs): from ..registry import tasks + kwargs["result"] = [r.task_id for r in result] tasks["celery.chord_unlock"].apply_async((setid, body, ), kwargs, countdown=1) @@ -297,6 +301,7 @@ def delete_taskset(self, taskset_id): class KeyValueStoreBackend(BaseDictBackend): task_keyprefix = "celery-task-meta-" taskset_keyprefix = "celery-taskset-meta-" + chord_keyprefix = "chord-unlock-" def get(self, key): raise NotImplementedError("Must implement the get method.") @@ -312,11 +317,15 @@ def delete(self, key): def get_key_for_task(self, task_id): """Get the cache key for a task by id.""" - return self.task_keyprefix + task_id + return ensure_bytes(self.task_keyprefix) + ensure_bytes(task_id) def get_key_for_taskset(self, taskset_id): - """Get the cache key for a task by id.""" - return self.taskset_keyprefix + taskset_id + """Get the cache key for a taskset by id.""" + return ensure_bytes(self.taskset_keyprefix) + ensure_bytes(taskset_id) + + def get_key_for_chord(self, taskset_id): + """Get the cache key for the chord waiting on taskset with given id.""" + return ensure_bytes(self.chord_keyprefix) + ensure_bytes(taskset_id) def _strip_prefix(self, key): for prefix in self.task_keyprefix, self.taskset_keyprefix: diff --git a/celery/backends/cache.py b/celery/backends/cache.py --- a/celery/backends/cache.py +++ b/celery/backends/cache.py @@ -4,7 +4,6 @@ from ..datastructures import LRUCache from ..exceptions import ImproperlyConfigured from ..utils import cached_property -from ..utils.encoding import ensure_bytes from .base import KeyValueStoreBackend @@ -24,7 +23,7 @@ def import_best_memcache(): raise ImproperlyConfigured( "Memcached backend requires either the 'pylibmc' " "or 'memcache' library") - _imp[0] = is_pylibmc, memcache + _imp[0] = (is_pylibmc, memcache) return _imp[0] @@ -55,6 +54,9 @@ def set(self, key, value, *args, **kwargs): def delete(self, key, *args, **kwargs): self.cache.pop(key, None) + def incr(self, key, delta=1): + return self.cache.incr(key, delta) + backends = {"memcache": lambda: get_best_memcache, "memcached": lambda: get_best_memcache, @@ -64,6 +66,7 @@ def delete(self, key, *args, **kwargs): class CacheBackend(KeyValueStoreBackend): servers = None + supports_native_join = True def __init__(self, expires=None, backend=None, options={}, **kwargs): super(CacheBackend, self).__init__(self, **kwargs) @@ -84,12 +87,6 @@ def __init__(self, expires=None, backend=None, options={}, **kwargs): "following backends: %s" % (self.backend, ", ".join(backends.keys()))) - def get_key_for_task(self, task_id): - return ensure_bytes(self.task_keyprefix) + ensure_bytes(task_id) - - def get_key_for_taskset(self, taskset_id): - return ensure_bytes(self.taskset_keyprefix) + ensure_bytes(taskset_id) - def get(self, key): return self.client.get(key) @@ -102,6 +99,23 @@ def set(self, key, value): def delete(self, key): return self.client.delete(key) + def on_chord_apply(self, setid, body, result=None, **kwargs): + key = self.get_key_for_chord(setid) + self.client.set(key, '0', time=86400) + + def on_chord_part_return(self, task, propagate=False): + from ..task.sets import subtask + from ..result import TaskSetResult + setid = task.request.taskset + if not setid: + return + key = self.get_key_for_chord(setid) + deps = TaskSetResult.restore(setid, backend=task.backend) + if self.client.incr(key) >= deps.total: + subtask(task.request.chord).delay(deps.join(propagate=propagate)) + deps.delete() + self.client.delete(key) + @cached_property def client(self): return self.Client(self.servers, **self.options) diff --git a/celery/backends/database.py b/celery/backends/database.py --- a/celery/backends/database.py +++ b/celery/backends/database.py @@ -129,9 +129,9 @@ def cleanup(self): expires = self.expires try: session.query(Task).filter( - Task.date_done < (datetime.now() - expires)).delete() + Task.date_done < (datetime.utcnow() - expires)).delete() session.query(TaskSet).filter( - TaskSet.date_done < (datetime.now() - expires)).delete() + TaskSet.date_done < (datetime.utcnow() - expires)).delete() session.commit() finally: session.close() diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py --- a/celery/backends/mongodb.py +++ b/celery/backends/mongodb.py @@ -69,8 +69,20 @@ def _get_connection(self): """Connect to the MongoDB server.""" if self._connection is None: from pymongo.connection import Connection - self._connection = Connection(self.mongodb_host, - self.mongodb_port) + + # The first pymongo.Connection() argument (host) can be + # a list of ['host:port'] elements or a mongodb connection + # URI. If this is the case, don't use self.mongodb_port + # but let pymongo get the port(s) from the URI instead. + # This enables the use of replica sets and sharding. + # See pymongo.Connection() for more info. + args = [self.mongodb_host] + if isinstance(self.mongodb_host, basestring) \ + and not self.mongodb_host.startswith("mongodb://"): + args.append(self.mongodb_port) + + self._connection = Connection(*args) + return self._connection def _get_database(self): @@ -102,7 +114,7 @@ def _store_result(self, task_id, result, status, traceback=None): meta = {"_id": task_id, "status": status, "result": Binary(self.encode(result)), - "date_done": datetime.now(), + "date_done": datetime.utcnow(), "traceback": Binary(self.encode(traceback))} db = self._get_database() @@ -130,13 +142,49 @@ def _get_task_meta_for(self, task_id): return meta + def _save_taskset(self, taskset_id, result): + """Save the taskset result.""" + from pymongo.binary import Binary + + meta = {"_id": taskset_id, + "result": Binary(self.encode(result)), + "date_done": datetime.utcnow()} + + db = self._get_database() + taskmeta_collection = db[self.mongodb_taskmeta_collection] + taskmeta_collection.save(meta, safe=True) + + return result + + def _restore_taskset(self, taskset_id): + """Get the result for a taskset by id.""" + db = self._get_database() + taskmeta_collection = db[self.mongodb_taskmeta_collection] + obj = taskmeta_collection.find_one({"_id": taskset_id}) + if not obj: + return None + + meta = { + "task_id": obj["_id"], + "result": self.decode(obj["result"]), + "date_done": obj["date_done"], + } + + return meta + + def _delete_taskset(self, taskset_id): + """Delete a taskset by id.""" + db = self._get_database() + taskmeta_collection = db[self.mongodb_taskmeta_collection] + taskmeta_collection.remove({"_id": taskset_id}) + def cleanup(self): """Delete expired metadata.""" db = self._get_database() taskmeta_collection = db[self.mongodb_taskmeta_collection] taskmeta_collection.remove({ "date_done": { - "$lt": datetime.now() - self.expires, + "$lt": datetime.utcnow() - self.expires, } }) diff --git a/celery/backends/redis.py b/celery/backends/redis.py --- a/celery/backends/redis.py +++ b/celery/backends/redis.py @@ -32,8 +32,13 @@ class RedisBackend(KeyValueStoreBackend): #: default Redis password (:const:`None`) password = None + #: Maximium number of connections in the pool. + max_connections = None + + supports_native_join = True + def __init__(self, host=None, port=None, db=None, password=None, - expires=None, **kwargs): + expires=None, max_connections=None, **kwargs): super(RedisBackend, self).__init__(**kwargs) conf = self.app.conf if self.redis is None: @@ -54,6 +59,9 @@ def _get(key): self.db = db or _get("DB") or self.db self.password = password or _get("PASSWORD") or self.password self.expires = self.prepare_expires(expires, type=int) + self.max_connections = (max_connections + or _get("MAX_CONNECTIONS") + or self.max_connections) def get(self, key): return self.client.get(key) @@ -66,29 +74,35 @@ def set(self, key, value): client.set(key, value) if self.expires is not None: client.expire(key, self.expires) + client.publish(key, value) def delete(self, key): self.client.delete(key) - def on_chord_apply(self, setid, *args, **kwargs): - pass + def on_chord_apply(self, setid, body, result=None, **kwargs): + self.app.TaskSetResult(setid, result).save() - def on_chord_part_return(self, task, propagate=False, - keyprefix="chord-unlock-%s"): + def on_chord_part_return(self, task, propagate=False): from ..task.sets import subtask from ..result import TaskSetResult setid = task.request.taskset - key = keyprefix % setid + if not setid: + return + key = self.get_key_for_chord(setid) deps = TaskSetResult.restore(setid, backend=task.backend) if self.client.incr(key) >= deps.total: subtask(task.request.chord).delay(deps.join(propagate=propagate)) deps.delete() - self.client.expire(key, 86400) + self.client.delete(key) + else: + self.client.expire(key, 86400) @cached_property def client(self): - return self.redis.Redis(host=self.host, port=self.port, - db=self.db, password=self.password) + pool = self.redis.ConnectionPool(host=self.host, port=self.port, + db=self.db, password=self.password, + max_connections=self.max_connections) + return self.redis.Redis(connection_pool=pool) def __reduce__(self, args=(), kwargs={}): kwargs.update( @@ -96,5 +110,6 @@ def __reduce__(self, args=(), kwargs={}): port=self.port, db=self.db, password=self.password, - expires=self.expires)) + expires=self.expires, + max_connections=self.max_connections)) return super(RedisBackend, self).__reduce__(args, kwargs) diff --git a/celery/beat.py b/celery/beat.py --- a/celery/beat.py +++ b/celery/beat.py @@ -88,14 +88,14 @@ def __init__(self, name=None, task=None, last_run_at=None, self.total_run_count = total_run_count or 0 def _default_now(self): - return datetime.now() + return datetime.utcnow() def _next_instance(self, last_run_at=None): """Returns a new instance of the same class, but with its date and count fields updated.""" return self.__class__(**dict(self, - last_run_at=last_run_at or datetime.now(), - total_run_count=self.total_run_count + 1)) + last_run_at=last_run_at or datetime.utcnow(), + total_run_count=self.total_run_count + 1)) __next__ = next = _next_instance # for 2to3 def update(self, other): diff --git a/celery/bin/base.py b/celery/bin/base.py --- a/celery/bin/base.py +++ b/celery/bin/base.py @@ -106,6 +106,12 @@ def handle_argv(self, prog_name, argv): """ options, args = self.parse_options(prog_name, argv) + for o in vars(options): + v = getattr(options, o) + if isinstance(v, basestring): + setattr(options, o, os.path.expanduser(v)) + argv = map(lambda a: isinstance(a, basestring) + and os.path.expanduser(a) or a, argv) if not self.supports_args and args: sys.stderr.write( "\nUnrecognized command line arguments: %s\n" % ( diff --git a/celery/bin/celerybeat.py b/celery/bin/celerybeat.py --- a/celery/bin/celerybeat.py +++ b/celery/bin/celerybeat.py @@ -77,7 +77,7 @@ def get_options(self): default=None, action="store", dest="scheduler_cls", help="Scheduler class. Default is " - "celery.beat.PersistentScheduler"), + "celery.beat:PersistentScheduler"), Option('-l', '--loglevel', default=conf.CELERYBEAT_LOG_LEVEL, action="store", dest="loglevel", diff --git a/celery/bin/celeryd.py b/celery/bin/celeryd.py --- a/celery/bin/celeryd.py +++ b/celery/bin/celeryd.py @@ -140,7 +140,7 @@ def get_options(self): default=None, action="store", dest="scheduler_cls", help="Scheduler class. Default is " - "celery.beat.PersistentScheduler"), + "celery.beat:PersistentScheduler"), Option('-S', '--statedb', default=conf.CELERYD_STATE_DB, action="store", dest="db", help="Path to the state database. The extension '.db' will " diff --git a/celery/concurrency/__init__.py b/celery/concurrency/__init__.py --- a/celery/concurrency/__init__.py +++ b/celery/concurrency/__init__.py @@ -4,11 +4,11 @@ from ..utils import get_cls_by_name ALIASES = { - "processes": "celery.concurrency.processes.TaskPool", - "eventlet": "celery.concurrency.eventlet.TaskPool", - "gevent": "celery.concurrency.gevent.TaskPool", - "threads": "celery.concurrency.threads.TaskPool", - "solo": "celery.concurrency.solo.TaskPool", + "processes": "celery.concurrency.processes:TaskPool", + "eventlet": "celery.concurrency.eventlet:TaskPool", + "gevent": "celery.concurrency.gevent:TaskPool", + "threads": "celery.concurrency.threads:TaskPool", + "solo": "celery.concurrency.solo:TaskPool", } diff --git a/celery/concurrency/base.py b/celery/concurrency/base.py --- a/celery/concurrency/base.py +++ b/celery/concurrency/base.py @@ -16,7 +16,7 @@ def apply_target(target, args=(), kwargs={}, callback=None, - accept_callback=None, pid=None): + accept_callback=None, pid=None, **_): if accept_callback: accept_callback(pid or os.getpid(), time.time()) callback(target(*args, **kwargs)) @@ -41,7 +41,7 @@ def __init__(self, limit=None, putlocks=True, logger=None, **options): self.putlocks = putlocks self.logger = logger or log.get_default_logger() self.options = options - self.does_debug = self.logger.isEnabledFor(logging.DEBUG) + self._does_debug = self.logger.isEnabledFor(logging.DEBUG) def on_start(self): pass @@ -59,6 +59,10 @@ def terminate_job(self, pid): raise NotImplementedError( "%s does not implement kill_job" % (self.__class__, )) + def restart(self): + raise NotImplementedError( + "%s does not implement restart" % (self.__class__, )) + def stop(self): self._state = self.CLOSE self.on_stop() @@ -77,7 +81,7 @@ def apply_async(self, target, args=None, kwargs=None, callback=None, soft_timeout=None, timeout=None, **compat): """Equivalent of the :func:`apply` built-in function. - Callbacks should optimally return as soon as possible ince + Callbacks should optimally return as soon as possible since otherwise the thread which handles the result will get blocked. """ @@ -87,7 +91,7 @@ def apply_async(self, target, args=None, kwargs=None, callback=None, on_ready = partial(self.on_ready, callback, errback) on_worker_error = partial(self.on_worker_error, errback) - if self.does_debug: + if self._does_debug: self.logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)", target, safe_repr(args), safe_repr(kwargs)) diff --git a/celery/concurrency/processes/__init__.py b/celery/concurrency/processes/__init__.py --- a/celery/concurrency/processes/__init__.py +++ b/celery/concurrency/processes/__init__.py @@ -65,6 +65,9 @@ def grow(self, n=1): def shrink(self, n=1): return self._pool.shrink(n) + def restart(self): + self._pool.restart() + def _get_info(self): return {"max-concurrency": self.limit, "processes": [p.pid for p in self._pool._pool], diff --git a/celery/concurrency/processes/pool.py b/celery/concurrency/processes/pool.py --- a/celery/concurrency/processes/pool.py +++ b/celery/concurrency/processes/pool.py @@ -24,7 +24,7 @@ import warnings import logging -from multiprocessing import Process, cpu_count, TimeoutError +from multiprocessing import Process, cpu_count, TimeoutError, Event from multiprocessing import util from multiprocessing.util import Finalize, debug @@ -134,7 +134,8 @@ def soft_timeout_sighandler(signum, frame): # -def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None): +def worker(inqueue, outqueue, initializer=None, initargs=(), + maxtasks=None, sentinel=None): # Re-init logging system. # Workaround for http://bugs.python.org/issue6721#msg140215 # Python logging module uses RLock() objects which are broken after @@ -177,6 +178,10 @@ def poll(timeout): # noqa completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): + if sentinel is not None and sentinel.is_set(): + debug('worker got sentinel -- exiting') + break + try: ready, task = poll(1.0) if not ready: @@ -543,6 +548,7 @@ def __init__(self, processes=None, initializer=None, initargs=(), raise TypeError('initializer must be a callable') self._pool = [] + self._poolctrl = {} for i in range(processes): self._create_worker_process() @@ -580,19 +586,22 @@ def __init__(self, processes=None, initializer=None, initargs=(), ) def _create_worker_process(self): + sentinel = Event() w = self.Process( target=worker, args=(self._inqueue, self._outqueue, self._initializer, self._initargs, - self._maxtasksperchild), + self._maxtasksperchild, + sentinel), ) self._pool.append(w) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() + self._poolctrl[w.pid] = sentinel return w - def _join_exited_workers(self, shutdown=False, lost_worker_timeout=10.0): + def _join_exited_workers(self, shutdown=False): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. @@ -600,11 +609,12 @@ def _join_exited_workers(self, shutdown=False, lost_worker_timeout=10.0): now = None # The worker may have published a result before being terminated, # but we have no way to accurately tell if it did. So we wait for - # 10 seconds before we mark the job with WorkerLostError. + # _lost_worker_timeout seconds before we mark the job with + # WorkerLostError. for job in [job for job in self._cache.values() if not job.ready() and job._worker_lost]: now = now or time.time() - if now - job._worker_lost > lost_worker_timeout: + if now - job._worker_lost > job._lost_worker_timeout: exc_info = None try: raise WorkerLostError("Worker exited prematurely.") @@ -625,6 +635,7 @@ def _join_exited_workers(self, shutdown=False, lost_worker_timeout=10.0): debug('Supervisor: worked %d joined' % i) cleaned.append(worker.pid) del self._pool[i] + del self._poolctrl[worker.pid] if cleaned: for job in self._cache.values(): for worker_pid in job.worker_pids(): @@ -730,39 +741,44 @@ def map(self, func, iterable, chunksize=None): assert self._state == RUN return self.map_async(func, iterable, chunksize).get() - def imap(self, func, iterable, chunksize=1): + def imap(self, func, iterable, chunksize=1, lost_worker_timeout=10.0): ''' Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()` ''' assert self._state == RUN if chunksize == 1: - result = IMapIterator(self._cache) + result = IMapIterator(self._cache, + lost_worker_timeout=lost_worker_timeout) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) - result = IMapIterator(self._cache) + result = IMapIterator(self._cache, + lost_worker_timeout=lost_worker_timeout) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) - def imap_unordered(self, func, iterable, chunksize=1): + def imap_unordered(self, func, iterable, chunksize=1, + lost_worker_timeout=10.0): ''' Like `imap()` method but ordering of results is arbitrary ''' assert self._state == RUN if chunksize == 1: - result = IMapUnorderedIterator(self._cache) + result = IMapUnorderedIterator(self._cache, + lost_worker_timeout=lost_worker_timeout) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) - result = IMapUnorderedIterator(self._cache) + result = IMapUnorderedIterator(self._cache, + lost_worker_timeout=lost_worker_timeout) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) @@ -868,6 +884,10 @@ def join(self): debug('joining worker %s/%s (%r)' % (i, len(self._pool), p, )) p.join() + def restart(self): + for e in self._poolctrl.itervalues(): + e.set() + @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue @@ -936,7 +956,7 @@ class ApplyResult(object): def __init__(self, cache, callback, accept_callback=None, timeout_callback=None, error_callback=None, soft_timeout=None, - timeout=None): + timeout=None, lost_worker_timeout=10.0): self._mutex = threading.Lock() self._cond = threading.Condition(threading.Lock()) self._job = job_counter.next() @@ -948,6 +968,7 @@ def __init__(self, cache, callback, accept_callback=None, self._timeout_callback = timeout_callback self._timeout = timeout self._soft_timeout = soft_timeout + self._lost_worker_timeout = lost_worker_timeout self._accepted = False self._worker_pid = None @@ -1091,15 +1112,19 @@ def worker_pids(self): class IMapIterator(object): + _worker_lost = None - def __init__(self, cache): + def __init__(self, cache, lost_worker_timeout=10.0): self._cond = threading.Condition(threading.Lock()) self._job = job_counter.next() self._cache = cache self._items = collections.deque() self._index = 0 self._length = None + self._ready = False self._unsorted = {} + self._worker_pids = [] + self._lost_worker_timeout = lost_worker_timeout cache[self._job] = self def __iter__(self): @@ -1112,12 +1137,14 @@ def next(self, timeout=None): item = self._items.popleft() except IndexError: if self._index == self._length: + self._ready = True raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: + self._ready = True raise StopIteration raise TimeoutError finally: @@ -1126,7 +1153,7 @@ def next(self, timeout=None): success, value = item if success: return value - raise value + raise Exception(value) __next__ = next # XXX @@ -1145,6 +1172,7 @@ def _set(self, i, obj): self._unsorted[i] = obj if self._index == self._length: + self._ready = True del self._cache[self._job] finally: self._cond.release() @@ -1154,11 +1182,21 @@ def _set_length(self, length): try: self._length = length if self._index == self._length: + self._ready = True self._cond.notify() del self._cache[self._job] finally: self._cond.release() + def _ack(self, i, time_accepted, pid): + self._worker_pids.append(pid) + + def ready(self): + return self._ready + + def worker_pids(self): + return self._worker_pids + # # Class whose instances are returned by `Pool.imap_unordered()` # @@ -1173,6 +1211,7 @@ def _set(self, i, obj): self._index += 1 self._cond.notify() if self._index == self._length: + self._ready = True del self._cache[self._job] finally: self._cond.release() diff --git a/celery/concurrency/solo.py b/celery/concurrency/solo.py --- a/celery/concurrency/solo.py +++ b/celery/concurrency/solo.py @@ -9,17 +9,13 @@ class TaskPool(BasePool): """Solo task pool (blocking, inline).""" - def on_start(self): - self.pid = os.getpid() - - def on_apply(self, target, args, kwargs, callback=None, - accept_callback=None, **_): - return apply_target(target, args, kwargs, - callback, accept_callback, self.pid) + def __init__(self, *args, **kwargs): + super(TaskPool, self).__init__(*args, **kwargs) + self.on_apply = apply_target def _get_info(self): return {"max-concurrency": 1, - "processes": [self.pid], + "processes": [os.getpid()], "max-tasks-per-child": None, "put-guarded-by-semaphore": True, "timeouts": ()} diff --git a/celery/datastructures.py b/celery/datastructures.py --- a/celery/datastructures.py +++ b/celery/datastructures.py @@ -19,6 +19,8 @@ from itertools import chain from threading import RLock +from kombu.utils.limits import TokenBucket # noqa + from .utils.compat import UserDict, OrderedDict @@ -374,60 +376,10 @@ def _iterate_values(self): pass itervalues = _iterate_values - -class TokenBucket(object): - """Token Bucket Algorithm. - - See http://en.wikipedia.org/wiki/Token_Bucket - Most of this code was stolen from an entry in the ASPN Python Cookbook: - http://code.activestate.com/recipes/511490/ - - .. admonition:: Thread safety - - This implementation may not be thread safe. - - """ - - #: The rate in tokens/second that the bucket will be refilled - fill_rate = None - - #: Maximum number of tokensin the bucket. - capacity = 1 - - #: Timestamp of the last time a token was taken out of the bucket. - timestamp = None - - def __init__(self, fill_rate, capacity=1): - self.capacity = float(capacity) - self._tokens = capacity - self.fill_rate = float(fill_rate) - self.timestamp = time.time() - - def can_consume(self, tokens=1): - """Returns :const:`True` if `tokens` number of tokens can be consumed - from the bucket.""" - if tokens <= self._get_tokens(): - self._tokens -= tokens - return True - return False - - def expected_time(self, tokens=1): - """Returns the expected time in seconds when a new token should be - available. - - .. admonition:: Warning - - This consumes a token from the bucket. - - """ - _tokens = self._get_tokens() - tokens = max(tokens, _tokens) - return (tokens - _tokens) / self.fill_rate - - def _get_tokens(self): - if self._tokens < self.capacity: - now = time.time() - delta = self.fill_rate * (now - self.timestamp) - self._tokens = min(self.capacity, self._tokens + delta) - self.timestamp = now - return self._tokens + def incr(self, key, delta=1): + with self.mutex: + # this acts as memcached does- store as a string, but return a + # integer as long as it exists and we can cast it + newval = int(self.data.pop(key)) + delta + self[key] = str(newval) + return newval diff --git a/celery/db/models.py b/celery/db/models.py --- a/celery/db/models.py +++ b/celery/db/models.py @@ -27,8 +27,8 @@ class Task(ResultModelBase): task_id = sa.Column(sa.String(255), unique=True) status = sa.Column(sa.String(50), default=states.PENDING) result = sa.Column(PickleType, nullable=True) - date_done = sa.Column(sa.DateTime, default=datetime.now, - onupdate=datetime.now, nullable=True) + date_done = sa.Column(sa.DateTime, default=datetime.utcnow, + onupdate=datetime.utcnow, nullable=True) traceback = sa.Column(sa.Text, nullable=True) def __init__(self, task_id): @@ -54,7 +54,7 @@ class TaskSet(ResultModelBase): autoincrement=True, primary_key=True) taskset_id = sa.Column(sa.String(255), unique=True) result = sa.Column(sa.PickleType, nullable=True) - date_done = sa.Column(sa.DateTime, default=datetime.now, + date_done = sa.Column(sa.DateTime, default=datetime.utcnow, nullable=True) def __init__(self, taskset_id, result): diff --git a/celery/events/__init__.py b/celery/events/__init__.py --- a/celery/events/__init__.py +++ b/celery/events/__init__.py @@ -77,6 +77,8 @@ def __init__(self, connection=None, hostname=None, enabled=True, self.publisher = None self._outbound_buffer = deque() self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER + self.on_enabled = set() + self.on_disabled = set() self.enabled = enabled if self.enabled: @@ -93,11 +95,15 @@ def enable(self): exchange=event_exchange, serializer=self.serializer) self.enabled = True + for callback in self.on_enabled: + callback() def disable(self): if self.enabled: self.enabled = False self.close() + for callback in self.on_disabled: + callback() def send(self, type, **fields): """Send event. diff --git a/celery/events/cursesmon.py b/celery/events/cursesmon.py --- a/celery/events/cursesmon.py +++ b/celery/events/cursesmon.py @@ -311,7 +311,7 @@ def display_task_row(self, lineno, task): attr = curses.A_NORMAL if task.uuid == self.selected_task: attr = curses.A_STANDOUT - timestamp = datetime.fromtimestamp( + timestamp = datetime.utcfromtimestamp( task.timestamp or time.time()) timef = timestamp.strftime("%H:%M:%S") line = self.format_row(task.uuid, task.name, diff --git a/celery/events/dumper.py b/celery/events/dumper.py --- a/celery/events/dumper.py +++ b/celery/events/dumper.py @@ -37,7 +37,7 @@ def humanize_type(type): class Dumper(object): def on_event(self, event): - timestamp = datetime.fromtimestamp(event.pop("timestamp")) + timestamp = datetime.utcfromtimestamp(event.pop("timestamp")) type = event.pop("type").lower() hostname = event.pop("hostname") if type.startswith("task-"): diff --git a/celery/events/snapshot.py b/celery/events/snapshot.py --- a/celery/events/snapshot.py +++ b/celery/events/snapshot.py @@ -17,9 +17,10 @@ import atexit +from kombu.utils.limits import TokenBucket + from .. import platforms from ..app import app_or_default -from ..datastructures import TokenBucket from ..utils import timer2, instantiate, LOG_LEVELS from ..utils.dispatch import Signal from ..utils.timeutils import rate diff --git a/celery/exceptions.py b/celery/exceptions.py --- a/celery/exceptions.py +++ b/celery/exceptions.py @@ -16,6 +16,14 @@ """ +class SecurityError(Exception): + """Security related exceptions. + + Handle with care. + + """ + + class SystemTerminate(SystemExit): """Signals that the worker should terminate.""" @@ -76,6 +84,10 @@ class NotConfigured(UserWarning): """Celery has not been configured, as no config module has been found.""" +class InvalidTaskError(Exception): + """The task has invalid data or is not properly constructed.""" + + class CPendingDeprecationWarning(PendingDeprecationWarning): pass diff --git a/celery/execute/trace.py b/celery/execute/trace.py --- a/celery/execute/trace.py +++ b/celery/execute/trace.py @@ -12,131 +12,223 @@ """ from __future__ import absolute_import +# ## --- +# BE WARNED: You are probably going to suffer a heartattack just +# by looking at this code! +# +# This is the heart of the worker, the inner loop so to speak. +# It used to be split up into nice little classes and methods, +# but in the end it only resulted in bad performance, and horrible tracebacks. + +import os +import socket import sys import traceback +import warnings +from .. import current_app from .. import states, signals from ..datastructures import ExceptionInfo from ..exceptions import RetryTaskError from ..registry import tasks +from ..utils.serialization import get_pickleable_exception + +send_prerun = signals.task_prerun.send +prerun_receivers = signals.task_prerun.receivers +send_postrun = signals.task_postrun.send +postrun_receivers = signals.task_postrun.receivers +STARTED = states.STARTED +SUCCESS = states.SUCCESS +RETRY = states.RETRY +FAILURE = states.FAILURE +EXCEPTION_STATES = states.EXCEPTION_STATES +_pid = None + + +def getpid(): + global _pid + if _pid is None: + _pid = os.getpid() + return _pid class TraceInfo(object): + __slots__ = ("state", "retval", "exc_info", + "exc_type", "exc_value", "tb", "strtb") - def __init__(self, status=states.PENDING, retval=None, exc_info=None): - self.status = status + def __init__(self, state, retval=None, exc_info=None): + self.state = state self.retval = retval self.exc_info = exc_info - self.exc_type = None - self.exc_value = None - self.tb = None - self.strtb = None - if self.exc_info: + if exc_info: self.exc_type, self.exc_value, self.tb = exc_info - self.strtb = "\n".join(traceback.format_exception(*exc_info)) + else: + self.exc_type = self.exc_value = self.tb = None - @classmethod - def trace(cls, fun, args, kwargs, propagate=False): - """Trace the execution of a function, calling the appropiate callback - if the function raises retry, an failure or returned successfully. + def handle_error_state(self, task, eager=False): + store_errors = not eager + if task.ignore_result: + store_errors = task.store_errors_even_if_ignored - :keyword propagate: If true, errors will propagate to the caller. + return { + RETRY: self.handle_retry, + FAILURE: self.handle_failure, + }[self.state](task, store_errors=store_errors) - """ - try: - return cls(states.SUCCESS, retval=fun(*args, **kwargs)) - except RetryTaskError, exc: - return cls(states.RETRY, retval=exc, exc_info=sys.exc_info()) - except Exception, exc: - if propagate: - raise - return cls(states.FAILURE, retval=exc, exc_info=sys.exc_info()) - except BaseException, exc: - raise - except: # pragma: no cover - # For Python2.5 where raising strings are still allowed - # (but deprecated) - if propagate: - raise - return cls(states.FAILURE, retval=None, exc_info=sys.exc_info()) - - -class TaskTrace(object): - - def __init__(self, task_name, task_id, args, kwargs, task=None, - request=None, propagate=None, **_): - self.task_id = task_id - self.task_name = task_name - self.args = args - self.kwargs = kwargs - self.task = task or tasks[self.task_name] - self.request = request or {} - self.status = states.PENDING - self.strtb = None - self.propagate = propagate - self._trace_handlers = {states.FAILURE: self.handle_failure, - states.RETRY: self.handle_retry, - states.SUCCESS: self.handle_success} - - def __call__(self): - return self.execute() - - def execute(self): - self.task.request.update(self.request, args=self.args, - called_directly=False, kwargs=self.kwargs) - signals.task_prerun.send(sender=self.task, task_id=self.task_id, - task=self.task, args=self.args, - kwargs=self.kwargs) - retval = self._trace() - - signals.task_postrun.send(sender=self.task, task_id=self.task_id, - task=self.task, args=self.args, - kwargs=self.kwargs, retval=retval) - self.task.request.clear() - return retval - - def _trace(self): - trace = TraceInfo.trace(self.task, self.args, self.kwargs, - propagate=self.propagate) - self.status = trace.status - self.strtb = trace.strtb - handler = self._trace_handlers[trace.status] - r = handler(trace.retval, trace.exc_type, trace.tb, trace.strtb) - self.handle_after_return(trace.status, trace.retval, - trace.exc_type, trace.tb, trace.strtb, - einfo=trace.exc_info) - return r - - def handle_after_return(self, status, retval, type_, tb, strtb, - einfo=None): - if status in states.EXCEPTION_STATES: - einfo = ExceptionInfo(einfo) - self.task.after_return(status, retval, self.task_id, - self.args, self.kwargs, einfo) - - def handle_success(self, retval, *args): - """Handle successful execution.""" - self.task.on_success(retval, self.task_id, self.args, self.kwargs) - return retval - - def handle_retry(self, exc, type_, tb, strtb): + def handle_retry(self, task, store_errors=True): """Handle retry exception.""" # Create a simpler version of the RetryTaskError that stringifies # the original exception instead of including the exception instance. # This is for reporting the retry in logs, email etc, while # guaranteeing pickleability. - message, orig_exc = exc.args + req = task.request + exc, type_, tb = self.retval, self.exc_type, self.tb + message, orig_exc = self.retval.args + if store_errors: + task.backend.mark_as_retry(req.id, orig_exc, self.strtb) expanded_msg = "%s: %s" % (message, str(orig_exc)) einfo = ExceptionInfo((type_, type_(expanded_msg, None), tb)) - self.task.on_retry(exc, self.task_id, self.args, self.kwargs, einfo) + task.on_retry(exc, req.id, req.args, req.kwargs, einfo) return einfo - def handle_failure(self, exc, type_, tb, strtb): + def handle_failure(self, task, store_errors=True): """Handle exception.""" + req = task.request + exc, type_, tb = self.retval, self.exc_type, self.tb + if store_errors: + task.backend.mark_as_failure(req.id, exc, self.strtb) + exc = get_pickleable_exception(exc) einfo = ExceptionInfo((type_, exc, tb)) - self.task.on_failure(exc, self.task_id, self.args, self.kwargs, einfo) - signals.task_failure.send(sender=self.task, task_id=self.task_id, - exception=exc, args=self.args, - kwargs=self.kwargs, traceback=tb, + task.on_failure(exc, req.id, req.args, req.kwargs, einfo) + signals.task_failure.send(sender=task, task_id=req.id, + exception=exc, args=req.args, + kwargs=req.kwargs, traceback=tb, einfo=einfo) return einfo + + @property + def strtb(self): + if self.exc_info: + return '\n'.join(traceback.format_exception(*self.exc_info)) + return '' + + +def build_tracer(name, task, loader=None, hostname=None, store_errors=True, + Info=TraceInfo, eager=False, propagate=False): + task = task or tasks[name] + loader = loader or current_app.loader + backend = task.backend + ignore_result = task.ignore_result + track_started = task.track_started + track_started = not eager and (task.track_started and not ignore_result) + publish_result = not eager and not ignore_result + hostname = hostname or socket.gethostname() + + loader_task_init = loader.on_task_init + loader_cleanup = loader.on_process_cleanup + + task_on_success = task.on_success + task_after_return = task.after_return + task_request = task.request + + store_result = backend.store_result + backend_cleanup = backend.process_cleanup + + pid = os.getpid() + + update_request = task_request.update + clear_request = task_request.clear + on_chord_part_return = backend.on_chord_part_return + + def trace_task(uuid, args, kwargs, request=None): + R = I = None + try: + update_request(request or {}, args=args, + called_directly=False, kwargs=kwargs) + try: + # -*- PRE -*- + send_prerun(sender=task, task_id=uuid, task=task, + args=args, kwargs=kwargs) + loader_task_init(uuid, task) + if track_started: + store_result(uuid, {"pid": pid, + "hostname": hostname}, STARTED) + + # -*- TRACE -*- + try: + R = retval = task(*args, **kwargs) + state, einfo = SUCCESS, None + task_on_success(retval, uuid, args, kwargs) + if publish_result: + store_result(uuid, retval, SUCCESS) + except RetryTaskError, exc: + I = Info(RETRY, exc, sys.exc_info()) + state, retval, einfo = I.state, I.retval, I.exc_info + R = I.handle_error_state(task, eager=eager) + except Exception, exc: + if propagate: + raise + I = Info(FAILURE, exc, sys.exc_info()) + state, retval, einfo = I.state, I.retval, I.exc_info + R = I.handle_error_state(task, eager=eager) + except BaseException, exc: + raise + except: + # pragma: no cover + # For Python2.5 where raising strings are still allowed + # (but deprecated) + if propagate: + raise + I = Info(FAILURE, None, sys.exc_info()) + state, retval, einfo = I.state, I.retval, I.exc_info + R = I.handle_error_state(task, eager=eager) + + # -* POST *- + if task_request.chord: + on_chord_part_return(task) + task_after_return(state, retval, uuid, args, kwargs, einfo) + send_postrun(sender=task, task_id=uuid, task=task, + args=args, kwargs=kwargs, retval=retval) + finally: + clear_request() + if not eager: + try: + backend_cleanup() + loader_cleanup() + except (KeyboardInterrupt, SystemExit, MemoryError): + raise + except Exception, exc: + logger = current_app.log.get_default_logger() + logger.error("Process cleanup failed: %r", exc, + exc_info=sys.exc_info()) + except Exception, exc: + if eager: + raise + R = report_internal_error(task, exc) + return R, I + + return trace_task + + +def trace_task(task, uuid, args, kwargs, request=None, **opts): + try: + if task.__tracer__ is None: + task.__tracer__ = build_tracer(task.name, task, **opts) + return task.__tracer__(uuid, args, kwargs, request) + except Exception, exc: + return report_internal_error(task, exc), None + + +def eager_trace_task(task, uuid, args, kwargs, request=None, **opts): + opts.setdefault("eager", True) + return build_tracer(task.name, task, **opts)( + uuid, args, kwargs, request) + + +def report_internal_error(task, exc): + _type, _value, _tb = sys.exc_info() + _value = task.backend.prepare_exception(exc) + exc_info = ExceptionInfo((_type, _value, _tb)) + warnings.warn("Exception outside body: %s: %s\n%s" % tuple( + map(str, (exc.__class__, exc, exc_info.traceback)))) + return exc_info diff --git a/celery/loaders/__init__.py b/celery/loaders/__init__.py --- a/celery/loaders/__init__.py +++ b/celery/loaders/__init__.py @@ -15,9 +15,9 @@ from .. import current_app from ..utils import deprecated, get_cls_by_name -LOADER_ALIASES = {"app": "celery.loaders.app.AppLoader", - "default": "celery.loaders.default.Loader", - "django": "djcelery.loaders.DjangoLoader"} +LOADER_ALIASES = {"app": "celery.loaders.app:AppLoader", + "default": "celery.loaders.default:Loader", + "django": "djcelery.loaders:DjangoLoader"} def get_loader_cls(loader): diff --git a/celery/log.py b/celery/log.py --- a/celery/log.py +++ b/celery/log.py @@ -24,6 +24,12 @@ is_py3k = sys.version_info >= (3, 0) +def mlevel(level): + if level and not isinstance(level, int): + return LOG_LEVELS[level.upper()] + return level + + class ColorFormatter(logging.Formatter): #: Loglevel -> Color mapping. COLORS = colored().names @@ -71,7 +77,7 @@ class Logging(object): def __init__(self, app): self.app = app - self.loglevel = self.app.conf.CELERYD_LOG_LEVEL + self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL) self.format = self.app.conf.CELERYD_LOG_FORMAT self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT self.colorize = self.app.conf.CELERYD_LOG_COLOR @@ -92,14 +98,14 @@ def colored(self, logfile=None): def get_task_logger(self, loglevel=None, name=None): logger = logging.getLogger(name or "celery.task.default") if loglevel is not None: - logger.setLevel(loglevel) + logger.setLevel(mlevel(loglevel)) return logger def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): if Logging._setup: return - loglevel = loglevel or self.loglevel + loglevel = mlevel(loglevel or self.loglevel) format = format or self.format if colorize is None: colorize = self.supports_color(logfile) @@ -120,7 +126,7 @@ def setup_logging_subsystem(self, loglevel=None, logfile=None, mp = mputil.get_logger() if mputil else None for logger in filter(None, (root, mp)): self._setup_logger(logger, logfile, format, colorize, **kwargs) - logger.setLevel(loglevel) + logger.setLevel(mlevel(loglevel)) signals.after_setup_logger.send(sender=None, logger=logger, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize) @@ -144,7 +150,7 @@ def get_default_logger(self, loglevel=None, name="celery"): """ logger = logging.getLogger(name) if loglevel is not None: - logger.setLevel(loglevel) + logger.setLevel(mlevel(loglevel)) return logger def setup_logger(self, loglevel=None, logfile=None, @@ -157,7 +163,7 @@ def setup_logger(self, loglevel=None, logfile=None, Returns logger object. """ - loglevel = loglevel or self.loglevel + loglevel = mlevel(loglevel or self.loglevel) format = format or self.format if colorize is None: colorize = self.supports_color(logfile) @@ -179,7 +185,7 @@ def setup_task_logger(self, loglevel=None, logfile=None, format=None, Returns logger object. """ - loglevel = loglevel or self.loglevel + loglevel = mlevel(loglevel or self.loglevel) format = format or self.task_format if colorize is None: colorize = self.supports_color(logfile) @@ -247,9 +253,7 @@ class LoggingProxy(object): def __init__(self, logger, loglevel=None): self.logger = logger - self.loglevel = loglevel or self.logger.level or self.loglevel - if not isinstance(self.loglevel, int): - self.loglevel = LOG_LEVELS[self.loglevel.upper()] + self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel) self._safewrap_handlers() def _safewrap_handlers(self): diff --git a/celery/platforms.py b/celery/platforms.py --- a/celery/platforms.py +++ b/celery/platforms.py @@ -21,11 +21,18 @@ from .local import try_import +from kombu.utils.limits import TokenBucket + _setproctitle = try_import("setproctitle") resource = try_import("resource") pwd = try_import("pwd") grp = try_import("grp") +try: + from multiprocessing.process import current_process +except ImportError: + current_process = None # noqa + SYSTEM = _platform.system() IS_OSX = SYSTEM == "Darwin" IS_WINDOWS = SYSTEM == "Windows" @@ -34,6 +41,8 @@ DAEMON_WORKDIR = "/" DAEMON_REDIRECT_TO = getattr(os, "devnull", "/dev/null") +_setps_bucket = TokenBucket(0.5) # 30/m, every 2 seconds + def pyimplementation(): if hasattr(_platform, "python_implementation"): @@ -283,7 +292,9 @@ def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0, workdir = os.getcwd() if workdir is None else workdir signals.reset("SIGCLD") # Make sure SIGCLD is using the default handler. - set_effective_user(uid=uid, gid=gid) + if not os.geteuid(): + # no point trying to setuid unless we're root. + maybe_drop_privileges(uid=uid, gid=gid) # Since without stderr any errors will be silently suppressed, # we need to know that we have access to the logfile. @@ -330,6 +341,51 @@ def parse_gid(gid): raise +def _setgroups_hack(groups): + """:fun:`setgroups` may have a platform-dependent limit, + and it is not always possible to know in advance what this limit + is, so we use this ugly hack stolen from glibc.""" + groups = groups[:] + + while 1: + try: + return os.setgroups(groups) + except ValueError: # error from Python's check. + if len(groups) <= 1: + raise + groups[:] = groups[:-1] + except OSError, exc: # error from the OS. + if exc.errno != errno.EINVAL or len(groups) <= 1: + raise + groups[:] = groups[:-1] + + +def setgroups(groups): + max_groups = None + try: + max_groups = os.sysconf("SC_NGROUPS_MAX") + except: + pass + try: + return _setgroups_hack(groups[:max_groups]) + except OSError, exc: + if exc.errno != errno.EPERM: + raise + if any(group not in groups for group in os.getgroups()): + # we shouldn't be allowed to change to this group. + raise + + +def initgroups(uid, gid): + if grp and pwd: + username = pwd.getpwuid(uid)[0] + if hasattr(os, "initgroups"): # Python 2.7+ + return os.initgroups(username, gid) + groups = [gr.gr_gid for gr in grp.getgrall() + if username in gr.gr_mem] + setgroups(groups) + + def setegid(gid): """Set effective group id.""" gid = parse_gid(gid) @@ -352,15 +408,15 @@ def setuid(uid): os.setuid(parse_uid(uid)) -def set_effective_user(uid=None, gid=None): +def maybe_drop_privileges(uid=None, gid=None): """Change process privileges to new user/group. - If UID and GID is set the effective user/group is set. + If UID and GID is specified, the real user/group is changed. - If only UID is set, the effective user is set, and the group is - set to the users primary group. + If only UID is specified, the real user is changed, and the group is + changed to the users primary group. - If only GID is set, the effective group is set. + If only GID is specified, only the group is changed. """ uid = uid and parse_uid(uid) @@ -370,7 +426,12 @@ def set_effective_user(uid=None, gid=None): # If GID isn't defined, get the primary GID of the user. if not gid and pwd: gid = pwd.getpwuid(uid).pw_gid - setgid(gid) + # Must set the GID before initgroups(), as setgid() + # is known to zap the group list on some platforms. + setgid(gid) + initgroups(uid, gid) + + # at last: setuid(uid) else: gid and setgid(gid) @@ -500,21 +561,27 @@ def set_process_title(progname, info=None): return proctitle -def set_mp_process_title(progname, info=None, hostname=None): - """Set the ps name using the multiprocessing process name. +if os.environ.get("NOSETPS"): - Only works if :mod:`setproctitle` is installed. + def set_mp_process_title(*a, **k): + pass +else: - """ - if hostname: - progname = "%s@%s" % (progname, hostname.split(".")[0]) - try: - from multiprocessing.process import current_process - except ImportError: - return set_process_title(progname, info=info) - else: - return set_process_title("%s:%s" % (progname, - current_process().name), info=info) + def set_mp_process_title(progname, info=None, hostname=None, # noqa + rate_limit=False): + """Set the ps name using the multiprocessing process name. + + Only works if :mod:`setproctitle` is installed. + + """ + if not rate_limit or _setps_bucket.can_consume(1): + if hostname: + progname = "%s@%s" % (progname, hostname.split(".")[0]) + if current_process is not None: + return set_process_title( + "%s:%s" % (progname, current_process().name), info=info) + else: + return set_process_title(progname, info=info) def shellsplit(s, posix=True): diff --git a/celery/registry.py b/celery/registry.py --- a/celery/registry.py +++ b/celery/registry.py @@ -17,7 +17,6 @@ class TaskRegistry(dict): - NotRegistered = NotRegistered def regular(self): @@ -59,12 +58,6 @@ def filter_types(self, type): return dict((name, task) for name, task in self.iteritems() if task.type == type) - def __getitem__(self, key): - try: - return dict.__getitem__(self, key) - except KeyError: - raise self.NotRegistered(key) - def pop(self, key, *args): try: return dict.pop(self, key, *args) diff --git a/celery/result.py b/celery/result.py --- a/celery/result.py +++ b/celery/result.py @@ -29,11 +29,11 @@ def _unpickle_result(task_id, task_name): return _unpickle_task(task_name).AsyncResult(task_id) -class BaseAsyncResult(object): - """Base class for pending result, supports custom task result backend. +class AsyncResult(object): + """Query task state. :param task_id: see :attr:`task_id`. - :param backend: see :attr:`backend`. + :keyword backend: see :attr:`backend`. """ @@ -46,10 +46,10 @@ class BaseAsyncResult(object): #: The task result backend to use. backend = None - def __init__(self, task_id, backend, task_name=None, app=None): + def __init__(self, task_id, backend=None, task_name=None, app=None): self.app = app_or_default(app) self.task_id = task_id - self.backend = backend + self.backend = backend or self.app.backend self.task_name = task_name def forget(self): @@ -194,23 +194,7 @@ def state(self): def status(self): """Deprecated alias of :attr:`state`.""" return self.state - - -class AsyncResult(BaseAsyncResult): - """Pending task result using the default backend. - - :param task_id: The task uuid. - - """ - - #: Task result store backend to use. - backend = None - - def __init__(self, task_id, backend=None, task_name=None, app=None): - app = app_or_default(app) - backend = backend or app.backend - super(AsyncResult, self).__init__(task_id, backend, - task_name=task_name, app=app) +BaseAsyncResult = AsyncResult # for backwards compatibility. class ResultSet(object): @@ -451,6 +435,10 @@ def subtasks(self): """Deprecated alias to :attr:`results`.""" return self.results + @property + def supports_native_join(self): + return self.results[0].backend.supports_native_join + class TaskSetResult(ResultSet): """An instance of this class is returned by diff --git a/celery/schedules.py b/celery/schedules.py --- a/celery/schedules.py +++ b/celery/schedules.py @@ -258,7 +258,7 @@ def _expand_cronspec(cronspec, max_): return result def __init__(self, minute='*', hour='*', day_of_week='*', - nowfun=datetime.now): + nowfun=datetime.utcnow): self._orig_minute = minute self._orig_hour = hour self._orig_day_of_week = day_of_week diff --git a/celery/security/__init__.py b/celery/security/__init__.py new file mode 100644 --- /dev/null +++ b/celery/security/__init__.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import +from __future__ import with_statement + +from kombu.serialization import registry, SerializerNotInstalled + +from .. import current_app +from ..exceptions import ImproperlyConfigured + +from .serialization import register_auth + +SSL_NOT_INSTALLED = """\ +You need to install the pyOpenSSL library to use the auth serializer. +Please install by: + + $ pip install pyOpenSSL +""" + +SETTING_MISSING = """\ +Sorry, but you have to configure the + * CELERY_SECURITY_KEY + * CELERY_SECURITY_CERTIFICATE, and the + * CELERY_SECURITY_CERT_STORE +configuration settings to use the auth serializer. + +Please see the configuration reference for more information. +""" + + +def disable_untrusted_serializers(whitelist=None): + for name in set(registry._decoders.keys()) - set(whitelist or []): + try: + registry.disable(name) + except SerializerNotInstalled: + pass + + +def setup_security(allowed_serializers=None, key=None, cert=None, store=None, + digest="sha1", serializer="json"): + """Setup the message-signing serializer. + + Disables untrusted serializers and if configured to use the ``auth`` + serializer will register the auth serializer with the provided settings + into the Kombu serializer registry. + + :keyword allowed_serializers: List of serializer names, or content_types + that should be exempt from being disabled. + :keyword key: Name of private key file to use. + Defaults to the :setting:`CELERY_SECURITY_KEY` setting. + :keyword cert: Name of certificate file to use. + Defaults to the :setting:`CELERY_SECURITY_CERTIFICATE` setting. + :keyword store: Directory containing certificates. + Defaults to the :setting:`CELERY_SECURITY_CERT_STORE` setting. + :keyword digest: Digest algorithm used when signing messages. + Default is ``sha1``. + :keyword serializer: Serializer used to encode messages after + they have been signed. See :setting:`CELERY_TASK_SERIALIZER` for + the serializers supported. + Default is ``json``. + + """ + + disable_untrusted_serializers(allowed_serializers) + + conf = current_app.conf + if conf.CELERY_TASK_SERIALIZER != "auth": + return + + try: + from OpenSSL import crypto # noqa + except ImportError: + raise ImproperlyConfigured(SSL_NOT_INSTALLED) + + key = key or conf.CELERY_SECURITY_KEY + cert = cert or conf.CELERY_SECURITY_CERTIFICATE + store = store or conf.CELERY_SECURITY_CERT_STORE + + if any(not v for v in (key, cert, store)): + raise ImproperlyConfigured(SETTING_MISSING) + + with open(key) as kf: + with open(cert) as cf: + register_auth(kf.read(), cf.read(), store) diff --git a/celery/security/certificate.py b/celery/security/certificate.py new file mode 100644 --- /dev/null +++ b/celery/security/certificate.py @@ -0,0 +1,87 @@ +from __future__ import absolute_import +from __future__ import with_statement + +import os +import glob + +try: + from OpenSSL import crypto +except ImportError: + crypto = None # noqa + +from ..exceptions import SecurityError + + +class Certificate(object): + """X.509 certificate.""" + + def __init__(self, cert): + assert crypto is not None + try: + self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) + except crypto.Error, exc: + raise SecurityError("Invalid certificate: %r" % (exc, )) + + def has_expired(self): + """Check if the certificate has expired.""" + return self._cert.has_expired() + + def get_serial_number(self): + """Returns the certificates serial number.""" + return self._cert.get_serial_number() + + def get_issuer(self): + """Returns issuer (CA) as a string""" + return ' '.join(x[1] for x in + self._cert.get_issuer().get_components()) + + def get_id(self): + """Serial number/issuer pair uniquely identifies a certificate""" + return "%s %s" % (self.get_issuer(), self.get_serial_number()) + + def verify(self, data, signature, digest): + """Verifies the signature for string containing data.""" + try: + crypto.verify(self._cert, signature, data, digest) + except crypto.Error, exc: + raise SecurityError("Bad signature: %r" % (exc, )) + + +class CertStore(object): + """Base class for certificate stores""" + + def __init__(self): + self._certs = {} + + def itercerts(self): + """an iterator over the certificates""" + for c in self._certs.itervalues(): + yield c + + def __getitem__(self, id): + """get certificate by id""" + try: + return self._certs[id] + except KeyError: + raise SecurityError("Unknown certificate: %r" % (id, )) + + def add_cert(self, cert): + if cert.get_id() in self._certs: + raise SecurityError("Duplicate certificate: %r" % (id, )) + self._certs[cert.get_id()] = cert + + +class FSCertStore(CertStore): + """File system certificate store""" + + def __init__(self, path): + CertStore.__init__(self) + if os.path.isdir(path): + path = os.path.join(path, '*') + for p in glob.glob(path): + with open(p) as f: + cert = Certificate(f.read()) + if cert.has_expired(): + raise SecurityError( + "Expired certificate: %r" % (cert.get_id(), )) + self.add_cert(cert) diff --git a/celery/security/key.py b/celery/security/key.py new file mode 100644 --- /dev/null +++ b/celery/security/key.py @@ -0,0 +1,25 @@ +from __future__ import absolute_import + +try: + from OpenSSL import crypto +except ImportError: + crypto = None # noqa + +from ..exceptions import SecurityError + + +class PrivateKey(object): + + def __init__(self, key): + assert crypto is not None + try: + self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key) + except crypto.Error, exc: + raise SecurityError("Invalid private key: %r" % (exc, )) + + def sign(self, data, digest): + """sign string containing data.""" + try: + return crypto.sign(self._key, data, digest) + except crypto.Error, exc: + raise SecurityError("Unable to sign data: %r" % (exc, )) diff --git a/celery/security/serialization.py b/celery/security/serialization.py new file mode 100644 --- /dev/null +++ b/celery/security/serialization.py @@ -0,0 +1,85 @@ +from __future__ import absolute_import + +import base64 + +from kombu.serialization import registry, encode, decode + +from ..exceptions import SecurityError +from ..utils.encoding import bytes_to_str, str_to_bytes + +from .certificate import Certificate, FSCertStore +from .key import PrivateKey + + +def b64encode(s): + return bytes_to_str(base64.b64encode(str_to_bytes(s))) + + +def b64decode(s): + return base64.b64decode(str_to_bytes(s)) + + +class SecureSerializer(object): + + def __init__(self, key=None, cert=None, cert_store=None, + digest="sha1", serializer="json"): + self._key = key + self._cert = cert + self._cert_store = cert_store + self._digest = digest + self._serializer = serializer + + def serialize(self, data): + """serialize data structure into string""" + assert self._key is not None + assert self._cert is not None + try: + content_type, content_encoding, body = encode( + data, serializer=self._serializer) + # What we sign is the serialized body, not the body itself. + # this way the receiver doesn't have to decode the contents + # to verify the signature (and thus avoiding potential flaws + # in the decoding step). + return self._pack(body, content_type, content_encoding, + signature=self._key.sign(body, self._digest), + signer=self._cert.get_id()) + except Exception, exc: + raise SecurityError("Unable to serialize: %r" % (exc, )) + + def deserialize(self, data): + """deserialize data structure from string""" + assert self._cert_store is not None + try: + payload = self._unpack(data) + signature, signer, body = (payload["signature"], + payload["signer"], + payload["body"]) + self._cert_store[signer].verify(body, + signature, self._digest) + except Exception, exc: + raise SecurityError("Unable to deserialize: %r" % (exc, )) + + return decode(body, payload["content_type"], + payload["content_encoding"], force=True) + + def _pack(self, body, content_type, content_encoding, signer, signature, + sep='\x00\x01'): + return b64encode(sep.join([signer, signature, + content_type, content_encoding, body])) + + def _unpack(self, payload, sep='\x00\x01', + fields=("signer", "signature", "content_type", + "content_encoding", "body")): + return dict(zip(fields, b64decode(payload).split(sep))) + + +def register_auth(key=None, cert=None, store=None, digest="sha1", + serializer="json"): + """register security serializer""" + s = SecureSerializer(key and PrivateKey(key), + cert and Certificate(cert), + store and FSCertStore(store), + digest=digest, serializer=serializer) + registry.register("auth", s.serialize, s.deserialize, + content_type="application/data", + content_encoding="utf-8") diff --git a/celery/task/__init__.py b/celery/task/__init__.py --- a/celery/task/__init__.py +++ b/celery/task/__init__.py @@ -71,11 +71,11 @@ def refresh_feed(url): .. code-block:: python @task(exchange="feeds") - def refresh_feed(url, **kwargs): + def refresh_feed(url): try: return Feed.objects.get(url=url).refresh() except socket.error, exc: - refresh_feed.retry(args=[url], kwargs=kwargs, exc=exc) + refresh_feed.retry(exc=exc) Calling the resulting task: diff --git a/celery/task/chords.py b/celery/task/chords.py --- a/celery/task/chords.py +++ b/celery/task/chords.py @@ -12,7 +12,7 @@ from __future__ import absolute_import from .. import current_app -from ..result import TaskSetResult +from ..result import AsyncResult, TaskSetResult from ..utils import uuid from .sets import TaskSet, subtask @@ -20,11 +20,11 @@ @current_app.task(name="celery.chord_unlock", max_retries=None) def _unlock_chord(setid, callback, interval=1, propagate=False, - max_retries=None): - result = TaskSetResult.restore(setid) + max_retries=None, result=None): + result = TaskSetResult(setid, map(AsyncResult, result)) if result.ready(): - subtask(callback).delay(result.join(propagate=propagate)) - result.delete() + j = result.join_native if result.supports_native_join else result.join + subtask(callback).delay(j(propagate=propagate)) else: _unlock_chord.retry(countdown=interval, max_retries=max_retries) @@ -43,10 +43,11 @@ def run(self, set, body, interval=1, max_retries=None, tid = uuid() task.options.update(task_id=tid, chord=body) r.append(current_app.AsyncResult(tid)) - current_app.TaskSetResult(setid, r).save() - self.backend.on_chord_apply(setid, body, interval, + self.backend.on_chord_apply(setid, body, + interval=interval, max_retries=max_retries, - propagate=propagate) + propagate=propagate, + result=r) return set.apply_async(taskset_id=setid) diff --git a/celery/utils/__init__.py b/celery/utils/__init__.py --- a/celery/utils/__init__.py +++ b/celery/utils/__init__.py @@ -71,7 +71,7 @@ def _inner(fun): @wraps(fun) def __inner(*args, **kwargs): - warn_deprecated(description=description or get_full_cls_name(fun), + warn_deprecated(description=description or qualname(fun), deprecation=deprecation, removal=removal, alternative=alternative) @@ -169,7 +169,7 @@ def noop(*args, **kwargs): pass -if sys.version_info >= (3, 0): +if sys.version_info >= (2, 6): def kwdict(kwargs): return kwargs @@ -264,10 +264,19 @@ def mattrgetter(*attrs): for attr in attrs) -def get_full_cls_name(cls): - """With a class, get its full module and class name.""" - return ".".join([cls.__module__, - cls.__name__]) +if sys.version_info >= (3, 3): + + def qualname(obj): + return obj.__qualname__ + +else: + + def qualname(obj): # noqa + if not hasattr(obj, "__name__") and hasattr(obj, "__class__"): + return qualname(obj.__class__) + + return '.'.join([obj.__module__, obj.__name__]) +get_full_cls_name = qualname # XXX Compat def fun_takes_kwargs(fun, kwlist=[]): @@ -299,7 +308,8 @@ def fun_takes_kwargs(fun, kwlist=[]): return filter(partial(operator.contains, args), kwlist) -def get_cls_by_name(name, aliases={}, imp=None, package=None, **kwargs): +def get_cls_by_name(name, aliases={}, imp=None, package=None, + sep='.', **kwargs): """Get class by name. The name should be the full dot-separated path to the class:: @@ -311,6 +321,10 @@ def get_cls_by_name(name, aliases={}, imp=None, package=None, **kwargs): celery.concurrency.processes.TaskPool ^- class name + or using ':' to separate module and symbol:: + + celery.concurrency.processes:TaskPool + If `aliases` is provided, a dict containing short name/long name mappings, the name is looked up in the aliases first. @@ -336,7 +350,8 @@ def get_cls_by_name(name, aliases={}, imp=None, package=None, **kwargs): return name # already a class name = aliases.get(name) or name - module_name, _, cls_name = name.rpartition(".") + sep = ':' if ':' in name else sep + module_name, _, cls_name = name.rpartition(sep) if not module_name and package: module_name = package try: diff --git a/celery/utils/coroutine.py b/celery/utils/coroutine.py new file mode 100644 --- /dev/null +++ b/celery/utils/coroutine.py @@ -0,0 +1,142 @@ +from __future__ import absolute_import + +from functools import wraps +from Queue import Queue + +from celery.utils import cached_property + + +def coroutine(fun): + """Decorator that turns a generator into a coroutine that is + started automatically, and that can send values back to the caller. + + **Example coroutine that returns values to caller**:: + + @coroutine + def adder(self): + while 1: + x, y = (yield) + self.give(x + y) + + >>> c = adder() + + # call sends value and returns the result. + >>> c.call(4, 4) + 8 + + # or you can send the value and get the result later. + >>> c.send(4, 4) + >>> c.get() + 8 + + + **Example sink (input-only coroutine)**:: + + @coroutine + def uniq(): + seen = set() + while 1: + line = (yield) + if line not in seen: + seen.add(line) + print(line) + + >>> u = uniq() + >>> [u.send(l) for l in [1, 2, 2, 3]] + [1, 2, 3] + + **Example chaining coroutines**:: + + @coroutine + def uniq(callback): + seen = set() + while 1: + line = (yield) + if line not in seen: + callback.send(line) + seen.add(line) + + @coroutine + def uppercaser(callback): + while 1: + line = (yield) + callback.send(str(line).upper()) + + @coroutine + def printer(): + while 1: + line = (yield) + print(line) + + >>> pipe = uniq(uppercaser(printer())) + >>> for line in file("AUTHORS").readlines(): + pipe.send(line) + + """ + @wraps(fun) + def start(*args, **kwargs): + return Coroutine.start_from(fun, *args, **kwargs) + return start + + +class Coroutine(object): + _gen = None + started = False + + def bind(self, generator): + self._gen = generator + + def _next(self): + return self._gen.next() + next = __next__ = _next + + def start(self): + if self.started: + raise ValueError("coroutine already started") + self.next() + self.started = True + return self + + def send1(self, value): + return self._gen.send(value) + + def call1(self, value, timeout=None): + self.send1(value) + return self.get(timeout=timeout) + + def send(self, *args): + return self._gen.send(args) + + def call(self, *args, **opts): + self.send(*args) + return self.get(**opts) + + @classmethod + def start_from(cls, fun, *args, **kwargs): + coro = cls() + coro.bind(fun(coro, *args, **kwargs)) + return coro.start() + + @cached_property + def __output__(self): + return Queue() + + @property + def give(self): + return self.__output__.put_nowait + + @property + def get(self): + return self.__output__.get + +if __name__ == "__main__": + + @coroutine + def adder(self): + while 1: + x, y = (yield) + self.give(x + y) + + x = adder() + for i in xrange(10): + print(x.call(i, i)) diff --git a/celery/utils/encoding.py b/celery/utils/encoding.py --- a/celery/utils/encoding.py +++ b/celery/utils/encoding.py @@ -39,6 +39,9 @@ def ensure_bytes(s): return str_to_bytes(s) return s + def default_encode(obj): + return obj + str_t = str bytes_t = bytes @@ -55,6 +58,9 @@ def bytes_to_str(s): # noqa def from_utf8(s, *args, **kwargs): # noqa return s.encode("utf-8", *args, **kwargs) + def default_encode(obj): # noqa + return unicode(obj, default_encoding()) + str_t = unicode bytes_t = str ensure_bytes = str_to_bytes diff --git a/celery/utils/threads.py b/celery/utils/threads.py new file mode 100644 --- /dev/null +++ b/celery/utils/threads.py @@ -0,0 +1,66 @@ +from __future__ import absolute_import + +import os +import sys +import threading + +_Thread = threading.Thread +_Event = threading._Event + + +class Event(_Event): + + if not hasattr(_Event, "is_set"): + is_set = _Event.isSet + + +class Thread(_Thread): + + if not hasattr(_Thread, "is_alive"): + is_alive = _Thread.isAlive + + if not hasattr(_Thread, "daemon"): + daemon = property(_Thread.isDaemon, _Thread.setDaemon) + + if not hasattr(_Thread, "name"): + name = property(_Thread.getName, _Thread.setName) + + +class bgThread(Thread): + + def __init__(self, name=None, **kwargs): + super(bgThread, self).__init__() + self._is_shutdown = Event() + self._is_stopped = Event() + self.daemon = True + self.name = name or self.__class__.__name__ + + def body(self): + raise NotImplementedError("subclass responsibility") + + def on_crash(self, msg, *fmt, **kwargs): + sys.stderr.write((msg + "\n") % fmt) + + def run(self): + shutdown = self._is_shutdown + while not shutdown.is_set(): + try: + self.body() + except Exception, exc: + self.on_crash("%r crashed: %r", self.name, exc, exc_info=True) + # exiting by normal means does not work here, so force exit. + os._exit(1) + try: + self._is_stopped.set() + except TypeError: # pragma: no cover + # we lost the race at interpreter shutdown, + # so gc collected built-in modules. + pass + self._is_stopped.set() + + def stop(self): + """Graceful shutdown.""" + self._is_shutdown.set() + self._is_stopped.wait() + if self.is_alive(): + self.join(1e100) diff --git a/celery/utils/timer2.py b/celery/utils/timer2.py --- a/celery/utils/timer2.py +++ b/celery/utils/timer2.py @@ -112,6 +112,10 @@ def enter(self, entry, eta=None, priority=0): if not self.handle_error(sys.exc_info()): raise + if eta is None: + # schedule now. + eta = time() + heapq.heappush(self._queue, (eta, priority, entry)) return entry diff --git a/celery/utils/timeutils.py b/celery/utils/timeutils.py --- a/celery/utils/timeutils.py +++ b/celery/utils/timeutils.py @@ -11,9 +11,18 @@ """ from __future__ import absolute_import +from kombu.utils import cached_property + from datetime import datetime, timedelta +from dateutil import tz from dateutil.parser import parse as parse_iso8601 +try: + import pytz +except ImportError: + pytz = None # noqa + + DAYNAMES = "sun", "mon", "tue", "wed", "thu", "fri", "sat" WEEKDAYS = dict((name, dow) for name, dow in zip(DAYNAMES, range(7))) @@ -21,6 +30,7 @@ "m": lambda n: n / 60.0, "h": lambda n: n / 60.0 / 60.0} + HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, "total_seconds") TIME_UNITS = (("day", 60 * 60 * 24.0, lambda n: "%.2f" % n), @@ -29,6 +39,47 @@ ("second", 1.0, lambda n: "%.2f" % n)) +class UnknownTimezone(Exception): + """No specification exists for the timezone specified. Consider + installing the pytz library to get access to more timezones.""" + + +def _is_naive(dt): + return bool(dt.tzinfo) + + +class _Zone(object): + + def tz_or_local(self, tzinfo=None): + if tzinfo is None: + return self.local + return self.get_timezone(tzinfo) + + def to_local(self, dt, local=None, orig=None): + return dt.replace(tzinfo=orig or self.utc).astimezone( + self.tz_or_local(local)) + + def get_timezone(self, zone): + if isinstance(zone, basestring): + if pytz: + return pytz.timezone(zone) + zone = tz.gettz(zone) + if zone is None: + raise UnknownTimezone(UnknownTimezone.__doc__) + return zone + return zone + + @cached_property + def local(self): + return tz.tzlocal() + + @cached_property + def utc(self): + return self.get_timezone("UTC") + +timezone = _Zone() + + def maybe_timedelta(delta): """Coerces integer to timedelta if `delta` is an integer.""" if isinstance(delta, (int, float)): @@ -45,6 +96,7 @@ def timedelta_seconds(delta): """ return max(delta.total_seconds(), 0) + else: # pragma: no cover def timedelta_seconds(delta): # noqa @@ -91,10 +143,10 @@ def remaining(start, ends_in, now=None, relative=False): calculated using :func:`delta_resolution` (i.e. rounded to the resolution of `ends_in`). :keyword now: Function returning the current time and date, - defaults to :func:`datetime.now`. + defaults to :func:`datetime.utcnow`. """ - now = now or datetime.now() + now = now or datetime.utcnow() end_date = start + ends_in if relative: diff --git a/celery/worker/__init__.py b/celery/worker/__init__.py --- a/celery/worker/__init__.py +++ b/celery/worker/__init__.py @@ -120,11 +120,12 @@ def __init__(self, concurrency=None, logfile=None, loglevel=None, pool_putlocks=None, db=None, prefetch_multiplier=None, eta_scheduler_precision=None, disable_rate_limits=None, autoscale=None, autoscaler_cls=None, scheduler_cls=None, - app=None): + queues=None, app=None): self.app = app_or_default(app) conf = self.app.conf self._shutdown_complete = threading.Event() + self.app.select_queues(queues) # select queues subset. # Options self.loglevel = loglevel or self.loglevel @@ -251,7 +252,6 @@ def __init__(self, concurrency=None, logfile=None, loglevel=None, # and they must be stopped in reverse order. self.components = filter(None, (self.pool, self.mediator, - self.scheduler, self.beat, self.autoscaler, self.consumer)) @@ -340,3 +340,7 @@ def on_timer_error(self, exc_info): def on_timer_tick(self, delay): self.timer_debug("Scheduler wake-up! Next eta %s secs." % delay) + + @property + def state(self): + return state diff --git a/celery/worker/autoreload.py b/celery/worker/autoreload.py new file mode 100644 --- /dev/null +++ b/celery/worker/autoreload.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.autoreload + ~~~~~~~~~~~~~~~~~~~~~~~~ + + This module implements automatic module reloading +""" +from __future__ import absolute_import +from __future__ import with_statement + +import os +import sys +import time +import select +import hashlib + +from collections import defaultdict + +from .. import current_app + + +def file_hash(filename, algorithm='md5'): + hobj = hashlib.new(algorithm) + with open(filename, 'rb') as f: + for chunk in iter(lambda: f.read(2 ** 20), ''): + hobj.update(chunk) + return hobj.digest() + + +class StatMonitor(object): + """File change monitor based on `stat` system call""" + def __init__(self, files, on_change=None, interval=0.5): + self._files = files + self._interval = interval + self._on_change = on_change + self._modify_times = defaultdict(int) + + def start(self): + while True: + modified = {} + for m in self._files: + mt = self._mtime(m) + if mt is None: + break + if self._modify_times[m] != mt: + modified[m] = mt + else: + if modified: + self.on_change(modified.keys()) + self._modify_times.update(modified) + + time.sleep(self._interval) + + def on_change(self, modified): + if self._on_change: + return self._on_change(modified) + + @classmethod + def _mtime(cls, path): + try: + return os.stat(path).st_mtime + except: + return + + +class KQueueMonitor(object): + """File change monitor based on BSD kernel event notifications""" + def __init__(self, files, on_change=None): + assert hasattr(select, 'kqueue') + self._files = dict([(f, None) for f in files]) + self._on_change = on_change + + def start(self): + try: + self._kq = select.kqueue() + kevents = [] + for f in self._files: + self._files[f] = fd = os.open(f, os.O_RDONLY) + + ev = select.kevent(fd, + filter=select.KQ_FILTER_VNODE, + flags=select.KQ_EV_ADD | + select.KQ_EV_ENABLE | + select.KQ_EV_CLEAR, + fflags=select.KQ_NOTE_WRITE | + select.KQ_NOTE_EXTEND) + kevents.append(ev) + + events = self._kq.control(kevents, 0) + while True: + events = self._kq.control(kevents, 1) + fds = [e.ident for e in events] + modified = [k for k, v in self._files.iteritems() + if v in fds] + self.on_change(modified) + finally: + self.close() + + def close(self): + self._kq.close() + for f in self._files: + if self._files[f] is not None: + os.close(self._files[f]) + self._files[f] = None + + def on_change(self, modified): + if self._on_change: + return self._on_change(modified) + + +try: + import pyinotify +except ImportError: + pyinotify = None # noqa + + +class InotifyMonitor(pyinotify and pyinotify.ProcessEvent or object): + """File change monitor based on Linux kernel `inotify` subsystem""" + def __init__(self, modules, on_change=None): + assert pyinotify + self._modules = modules + self._on_change = on_change + + def start(self): + try: + self._wm = pyinotify.WatchManager() + self._notifier = pyinotify.Notifier(self._wm) + for m in self._modules: + self._wm.add_watch(m, pyinotify.IN_MODIFY) + self._notifier.loop() + finally: + self.close() + + def close(self): + self._notifier.stop() + self._wm.close() + + def process_IN_MODIFY(self, event): + self.on_change(event.pathname) + + def on_change(self, modified): + if self._on_change: + self._on_change(modified) + + +if hasattr(select, 'kqueue'): + _monitor_cls = KQueueMonitor +elif sys.platform.startswith('linux') and pyinotify: + _monitor_cls = InotifyMonitor +else: + _monitor_cls = StatMonitor + + +class AutoReloader(object): + """Tracks changes in modules and fires reload commands""" + def __init__(self, modules, monitor_cls=_monitor_cls, *args, **kwargs): + self._monitor = monitor_cls(modules, self.on_change, *args, **kwargs) + self._hashes = dict([(f, file_hash(f)) for f in modules]) + + def start(self): + self._monitor.start() + + def on_change(self, files): + modified = [] + for f in files: + fhash = file_hash(f) + if fhash != self._hashes[f]: + modified.append(f) + self._hashes[f] = fhash + if modified: + self._reload(map(self._module_name, modified)) + + def _reload(self, modules): + current_app.control.broadcast("pool_restart", + arguments={"imports": modules, "reload_modules": True}) + + @classmethod + def _module_name(cls, path): + return os.path.splitext(os.path.basename(path))[0] diff --git a/celery/worker/autoscale.py b/celery/worker/autoscale.py --- a/celery/worker/autoscale.py +++ b/celery/worker/autoscale.py @@ -17,7 +17,6 @@ from __future__ import absolute_import from __future__ import with_statement -import os import sys import threading import traceback @@ -25,13 +24,14 @@ from time import sleep, time from . import state +from ..utils.threads import bgThread -class Autoscaler(threading.Thread): +class Autoscaler(bgThread): def __init__(self, pool, max_concurrency, min_concurrency=0, keepalive=30, logger=None): - threading.Thread.__init__(self) + super(Autoscaler, self).__init__() self.pool = pool self.mutex = threading.Lock() self.max_concurrency = max_concurrency @@ -39,14 +39,10 @@ def __init__(self, pool, max_concurrency, min_concurrency=0, self.keepalive = keepalive self.logger = logger self._last_action = None - self._is_shutdown = threading.Event() - self._is_stopped = threading.Event() - self.setDaemon(True) - self.setName(self.__class__.__name__) assert self.keepalive, "can't scale down too fast." - def scale(self): + def body(self): with self.mutex: current = min(self.qty, self.max_concurrency) if current > self.processes: @@ -54,6 +50,8 @@ def scale(self): elif current < self.processes: self.scale_down( (self.processes - current) - self.min_concurrency) + sleep(1.0) + scale = body # XXX compat def update(self, max=None, min=None): with self.mutex: @@ -109,23 +107,6 @@ def scale_down(self, n): self._last_action = time() self._shrink(n) - def run(self): - while not self._is_shutdown.isSet(): - try: - self.scale() - sleep(1.0) - except Exception, exc: - self.logger.error("Thread Autoscaler crashed: %r", exc, - exc_info=sys.exc_info()) - os._exit(1) - self._is_stopped.set() - - def stop(self): - self._is_shutdown.set() - self._is_stopped.wait() - if self.isAlive(): - self.join(1e10) - def info(self): return {"max": self.max_concurrency, "min": self.min_concurrency, diff --git a/celery/worker/buckets.py b/celery/worker/buckets.py --- a/celery/worker/buckets.py +++ b/celery/worker/buckets.py @@ -24,7 +24,8 @@ from time import time, sleep from Queue import Queue, Empty -from ..datastructures import TokenBucket +from kombu.utils.limits import TokenBucket + from ..utils import timeutils from ..utils.compat import zip_longest, chain_from_iterable diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py --- a/celery/worker/consumer.py +++ b/celery/worker/consumer.py @@ -76,6 +76,7 @@ from __future__ import absolute_import from __future__ import with_statement +import logging import socket import sys import threading @@ -84,13 +85,13 @@ from ..app import app_or_default from ..datastructures import AttributeDict -from ..exceptions import NotRegistered +from ..exceptions import InvalidTaskError +from ..registry import tasks from ..utils import noop from ..utils import timer2 from ..utils.encoding import safe_repr from . import state -from .job import TaskRequest, InvalidTaskError -from .control.registry import Panel +from .control import Panel from .heartbeat import Heart RUN = 0x1 @@ -293,6 +294,14 @@ def __init__(self, ready_queue, eta_schedule, logger, self.connection_errors = conninfo.connection_errors self.channel_errors = conninfo.channel_errors + self._does_info = self.logger.isEnabledFor(logging.INFO) + self.strategies = {} + + def update_strategies(self): + S = self.strategies + for task in tasks.itervalues(): + S[task.name] = task.start_strategy(self.app, self) + def start(self): """Start the consumer. @@ -341,7 +350,8 @@ def on_task(self, task): if task.revoked(): return - self.logger.info("Got task from broker: %s", task.shortinfo()) + if self._does_info: + self.logger.info("Got task from broker: %s", task.shortinfo()) if self.event_dispatcher.enabled: self.event_dispatcher.send("task-received", uuid=task.task_id, @@ -399,43 +409,26 @@ def receive_message(self, body, message): :param message: The kombu message object. """ - # need to guard against errors occurring while acking the message. - def ack(): - try: - message.ack() - except self.connection_errors + (AttributeError, ), exc: - self.logger.critical( - "Couldn't ack %r: %s reason:%r", - message.delivery_tag, - self._message_report(body, message), exc) - try: - body["task"] + name = body["task"] except (KeyError, TypeError): warnings.warn(RuntimeWarning( "Received and deleted unknown message. Wrong destination?!? \ the full contents of the message body was: %s" % ( self._message_report(body, message), ))) - ack() + message.ack_log_error(self.logger, self.connection_errors) return try: - task = TaskRequest.from_message(message, body, ack, - app=self.app, - logger=self.logger, - hostname=self.hostname, - eventer=self.event_dispatcher) - - except NotRegistered, exc: + self.strategies[name](message, body, message.ack_log_error) + except KeyError, exc: self.logger.error(UNKNOWN_TASK_ERROR, exc, safe_repr(body), exc_info=sys.exc_info()) - ack() + message.ack_log_error(self.logger, self.connection_errors) except InvalidTaskError, exc: self.logger.error(INVALID_TASK_ERROR, str(exc), safe_repr(body), exc_info=sys.exc_info()) - ack() - else: - self.on_task(task) + message.ack_log_error(self.logger, self.connection_errors) def maybe_conn_error(self, fun): """Applies function but ignores any connection or channel @@ -603,6 +596,9 @@ def reset_connection(self): # Restart heartbeat thread. self.restart_heartbeat() + # reload all task's execution strategies. + self.update_strategies() + # We're back! self._state = RUN diff --git a/celery/worker/control/builtins.py b/celery/worker/control.py similarity index 88% rename from celery/worker/control/builtins.py rename to celery/worker/control.py --- a/celery/worker/control/builtins.py +++ b/celery/worker/control.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- """ - celery.worker.control.builtins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + celery.worker.control + ~~~~~~~~~~~~~~~~~~~~~ - THis module contains the built-in remote control commands. + Remote control commands. :copyright: (c) 2009 - 2011 by Ask Solem. :license: BSD, see LICENSE for more details. @@ -15,18 +15,27 @@ from datetime import datetime -from ...platforms import signals as _signals -from ...registry import tasks -from ...utils import timeutils -from ...utils.encoding import safe_repr -from .. import state -from ..state import revoked +from ..platforms import signals as _signals +from ..registry import tasks +from ..utils import timeutils +from ..utils.compat import UserDict +from ..utils.encoding import safe_repr -from .registry import Panel +from . import state +from .state import revoked TASK_INFO_FIELDS = ("exchange", "routing_key", "rate_limit") +class Panel(UserDict): + data = dict() # Global registry. + + @classmethod + def register(cls, method, name=None): + cls.data[name or method.__name__] = method + return method + + @Panel.register def revoke(panel, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id.""" @@ -137,7 +146,7 @@ def dump_schedule(panel, safe=False, **kwargs): return [] formatitem = lambda (i, item): "%s. %s pri%s %r" % (i, - datetime.fromtimestamp(item["eta"]), + datetime.utcfromtimestamp(item["eta"]), item["priority"], item["item"]) info = map(formatitem, enumerate(schedule.info())) @@ -229,6 +238,22 @@ def pool_shrink(panel, n=1, **kwargs): return {"ok": "terminated worker processes"} +@Panel.register +def pool_restart(panel, imports=None, reload_imports=False, + reload=reload, **kwargs): + imports = set(imports or []) + for m in imports: + if m not in sys.modules: + panel.app.loader.import_from_cwd(m) + panel.logger.debug("imported %s module" % m) + elif reload_imports: + reload(sys.modules[m]) + panel.logger.debug("reloaded %s module" % m) + + panel.consumer.pool.restart() + return {"ok": "started restarting worker processes"} + + @Panel.register def autoscale(panel, max=None, min=None): autoscaler = panel.consumer.controller.autoscaler diff --git a/celery/worker/control/__init__.py b/celery/worker/control/__init__.py deleted file mode 100644 --- a/celery/worker/control/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.control - ~~~~~~~~~~~~~~~~~~~~~ - - Remote control commands. - See :mod:`celery.worker.control.builtins`. - - :copyright: (c) 2009 - 2011 by Ask Solem. - :license: BSD, see LICENSE for more details. - -""" -from __future__ import absolute_import - -from . import registry - -# Loads the built-in remote control commands -from . import builtins # noqa - -Panel = registry.Panel diff --git a/celery/worker/control/registry.py b/celery/worker/control/registry.py deleted file mode 100644 --- a/celery/worker/control/registry.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.control.registry - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - The registry keeps track of available remote control commands, - and can be used to register new commands. - - :copyright: (c) 2009 - 2011 by Ask Solem. - :license: BSD, see LICENSE for more details. - -""" -from __future__ import absolute_import - -from ...utils.compat import UserDict - - -class Panel(UserDict): - data = dict() # Global registry. - - @classmethod - def register(cls, method, name=None): - cls.data[name or method.__name__] = method - return method diff --git a/celery/worker/heartbeat.py b/celery/worker/heartbeat.py --- a/celery/worker/heartbeat.py +++ b/celery/worker/heartbeat.py @@ -31,16 +31,23 @@ def __init__(self, timer, eventer, interval=None): self.interval = interval or 30 self.tref = None + # Make event dispatcher start/stop us when it's + # enabled/disabled. + self.eventer.on_enabled.add(self.start) + self.eventer.on_disabled.add(self.stop) + def _send(self, event): return self.eventer.send(event, **SOFTWARE_INFO) def start(self): - self._send("worker-online") - self.tref = self.timer.apply_interval(self.interval * 1000.0, - self._send, ("worker-heartbeat", )) + if self.eventer.enabled: + self._send("worker-online") + self.tref = self.timer.apply_interval(self.interval * 1000.0, + self._send, ("worker-heartbeat", )) def stop(self): if self.tref is not None: self.timer.cancel(self.tref) self.tref = None - self._send("worker-offline") + if self.eventer.enabled: + self._send("worker-offline") diff --git a/celery/worker/job.py b/celery/worker/job.py --- a/celery/worker/job.py +++ b/celery/worker/job.py @@ -12,162 +12,50 @@ """ from __future__ import absolute_import -import os -import sys +import logging import time import socket -import warnings from datetime import datetime -from .. import current_app from .. import exceptions -from .. import platforms -from .. import registry +from ..registry import tasks from ..app import app_or_default -from ..datastructures import ExceptionInfo -from ..execute.trace import TaskTrace -from ..utils import noop, kwdict, fun_takes_kwargs, truncate_text -from ..utils.encoding import safe_repr, safe_str, default_encoding -from ..utils.serialization import get_pickleable_exception -from ..utils.timeutils import maybe_iso8601 +from ..execute.trace import build_tracer, trace_task, report_internal_error +from ..platforms import set_mp_process_title as setps +from ..utils import (noop, kwdict, fun_takes_kwargs, + cached_property, truncate_text) +from ..utils.encoding import safe_repr, safe_str +from ..utils.timeutils import maybe_iso8601, timezone from . import state -#: Keys to keep from the message delivery info. The values -#: of these keys must be pickleable. -WANTED_DELIVERY_INFO = ("exchange", "routing_key", "consumer_tag", ) +# Localize +tz_to_local = timezone.to_local +tz_or_local = timezone.tz_or_local +tz_utc = timezone.utc -class InvalidTaskError(Exception): - """The task has invalid data or is not properly constructed.""" - pass - - -if sys.version_info >= (3, 0): - - def default_encode(obj): - return obj -else: - - def default_encode(obj): # noqa - return unicode(obj, default_encoding()) - - -class WorkerTaskTrace(TaskTrace): - """Wraps the task in a jail, catches all exceptions, and - saves the status and result of the task execution to the task - meta backend. - - If the call was successful, it saves the result to the task result - backend, and sets the task status to `"SUCCESS"`. - - If the call raises :exc:`~celery.exceptions.RetryTaskError`, it extracts - the original exception, uses that as the result and sets the task status - to `"RETRY"`. - - If the call results in an exception, it saves the exception as the task - result, and sets the task status to `"FAILURE"`. - - :param task_name: The name of the task to execute. - :param task_id: The unique id of the task. - :param args: List of positional args to pass on to the function. - :param kwargs: Keyword arguments mapping to pass on to the function. - - :keyword loader: Custom loader to use, if not specified the current app - loader will be used. - :keyword hostname: Custom hostname to use, if not specified the system - hostname will be used. - - :returns: the evaluated functions return value on success, or - the exception instance on failure. - - """ - - #: Current loader. - loader = None - - #: Hostname to report as. - hostname = None - - def __init__(self, *args, **kwargs): - self.loader = kwargs.get("loader") or current_app.loader - self.hostname = kwargs.get("hostname") or socket.gethostname() - super(WorkerTaskTrace, self).__init__(*args, **kwargs) - - self._store_errors = True - if self.task.ignore_result: - self._store_errors = self.task.store_errors_even_if_ignored - self.super = super(WorkerTaskTrace, self) - - def execute_safe(self, *args, **kwargs): - """Same as :meth:`execute`, but catches errors.""" - try: - return self.execute(*args, **kwargs) - except Exception, exc: - _type, _value, _tb = sys.exc_info() - _value = self.task.backend.prepare_exception(exc) - exc_info = ExceptionInfo((_type, _value, _tb)) - warnings.warn("Exception outside body: %s: %s\n%s" % tuple( - map(str, (exc.__class__, exc, exc_info.traceback)))) - return exc_info - - def execute(self): - """Execute, trace and store the result of the task.""" - self.loader.on_task_init(self.task_id, self.task) - if self.task.track_started: - if not self.task.ignore_result: - self.task.backend.mark_as_started(self.task_id, - pid=os.getpid(), - hostname=self.hostname) - try: - return super(WorkerTaskTrace, self).execute() - finally: - try: - self.task.backend.process_cleanup() - self.loader.on_process_cleanup() - except (KeyboardInterrupt, SystemExit, MemoryError): - raise - except Exception, exc: - logger = current_app.log.get_default_logger() - logger.error("Process cleanup failed: %r", exc, - exc_info=sys.exc_info()) - - def handle_success(self, retval, *args): - """Handle successful execution.""" - if not self.task.ignore_result: - self.task.backend.mark_as_done(self.task_id, retval) - return self.super.handle_success(retval, *args) - - def handle_retry(self, exc, type_, tb, strtb): - """Handle retry exception.""" - message, orig_exc = exc.args - if self._store_errors: - self.task.backend.mark_as_retry(self.task_id, orig_exc, strtb) - return self.super.handle_retry(exc, type_, tb, strtb) - - def handle_failure(self, exc, type_, tb, strtb): - """Handle exception.""" - if self._store_errors: - self.task.backend.mark_as_failure(self.task_id, exc, strtb) - exc = get_pickleable_exception(exc) - return self.super.handle_failure(exc, type_, tb, strtb) - - -def execute_and_trace(task_name, *args, **kwargs): +def execute_and_trace(name, uuid, args, kwargs, request=None, **opts): """This is a pickleable method used as a target when applying to pools. It's the same as:: - >>> WorkerTaskTrace(task_name, *args, **kwargs).execute_safe() + >>> trace_task(name, *args, **kwargs)[0] """ - hostname = kwargs.get("hostname") - platforms.set_mp_process_title("celeryd", task_name, hostname=hostname) + task = tasks[name] try: - return WorkerTaskTrace(task_name, *args, **kwargs).execute_safe() - finally: - platforms.set_mp_process_title("celeryd", "-idle-", hostname) + hostname = opts.get("hostname") + setps("celeryd", name, hostname, rate_limit=True) + try: + if task.__tracer__ is None: + task.__tracer__ = build_tracer(name, task, **opts) + return task.__tracer__(uuid, args, kwargs, request)[0] + finally: + setps("celeryd", "-idle-", hostname, rate_limit=True) + except Exception, exc: + return report_internal_error(task, exc) class TaskRequest(object): @@ -176,14 +64,14 @@ class TaskRequest(object): #: Kind of task. Must be a name registered in the task registry. name = None - #: The task class (set by constructor using :attr:`task_name`). + #: The task class (set by constructor using :attr:`name`). task = None #: UUID of the task. - task_id = None + id = None #: UUID of the taskset that this task belongs to. - taskset_id = None + taskset = None #: List of positional arguments to apply to the task. args = None @@ -238,68 +126,83 @@ class TaskRequest(object): _already_revoked = False _terminate_on_ack = None - def __init__(self, task_name, task_id, args, kwargs, - on_ack=noop, retries=0, delivery_info=None, hostname=None, + def __init__(self, task, id, args=[], kwargs={}, + on_ack=noop, retries=0, delivery_info={}, hostname=None, logger=None, eventer=None, eta=None, expires=None, app=None, - taskset_id=None, chord=None, **opts): - self.app = app_or_default(app) - self.task_name = task_name - self.task_id = task_id - self.taskset_id = taskset_id - self.retries = retries + taskset=None, chord=None, utc=False, connection_errors=None, + **opts): + try: + kwargs.items + except AttributeError: + raise exceptions.InvalidTaskError( + "Task keyword arguments is not a mapping") + self.app = app or app_or_default(app) + self.name = task + self.id = id self.args = args - self.kwargs = kwargs - self.eta = eta - self.expires = expires + self.kwargs = kwdict(kwargs) + self.taskset = taskset + self.retries = retries self.chord = chord self.on_ack = on_ack self.delivery_info = {} if delivery_info is None else delivery_info self.hostname = hostname or socket.gethostname() self.logger = logger or self.app.log.get_default_logger() self.eventer = eventer + self.connection_errors = connection_errors or () + + task = self.task = tasks[task] + + # timezone means the message is timezone-aware, and the only timezone + # supported at this point is UTC. + if eta is not None: + tz = tz_utc if utc else self.tzlocal + self.eta = tz_to_local(maybe_iso8601(eta), self.tzlocal, tz) + if expires is not None: + tz = tz_utc if utc else self.tzlocal + self.expires = tz_to_local(maybe_iso8601(expires), + self.tzlocal, tz) - self.task = registry.tasks[self.task_name] - self._store_errors = True - if self.task.ignore_result: - self._store_errors = self.task.store_errors_even_if_ignored + # shortcuts + self._does_debug = self.logger.isEnabledFor(logging.DEBUG) + self._does_info = self.logger.isEnabledFor(logging.INFO) + + self.request_dict = {"hostname": self.hostname, + "id": id, "taskset": taskset, + "retries": retries, "is_eager": False, + "delivery_info": delivery_info, "chord": chord} @classmethod - def from_message(cls, message, body, on_ack=noop, **kw): + def from_message(cls, message, body, on_ack=noop, delivery_info={}, + logger=None, hostname=None, eventer=None, app=None, + connection_errors=None): """Create request from a task message. :raises UnknownTaskError: if the message does not describe a task, the message is also rejected. """ - delivery_info = getattr(message, "delivery_info", {}) - delivery_info = dict((key, delivery_info.get(key)) - for key in WANTED_DELIVERY_INFO) + try: + D = message.delivery_info + delivery_info = {"exchange": D.get("exchange"), + "routing_key": D.get("routing_key")} + except (AttributeError, KeyError): + pass - kwargs = body.get("kwargs", {}) - if not hasattr(kwargs, "items"): - raise InvalidTaskError("Task keyword arguments is not a mapping.") try: - task_name = body["task"] - task_id = body["id"] - except KeyError, exc: - raise InvalidTaskError( - "Task message is missing required field %r" % (exc, )) - - return cls(task_name=task_name, - task_id=task_id, - taskset_id=body.get("taskset", None), - args=body.get("args", []), - kwargs=kwdict(kwargs), - chord=body.get("chord"), - retries=body.get("retries", 0), - eta=maybe_iso8601(body.get("eta")), - expires=maybe_iso8601(body.get("expires")), - on_ack=on_ack, delivery_info=delivery_info, **kw) + return cls(on_ack=on_ack, logger=logger, eventer=eventer, app=app, + delivery_info=delivery_info, hostname=hostname, + connection_errors=connection_errors, **body) + except TypeError: + for f in ("task", "id"): + if f not in body: + raise exceptions.InvalidTaskError( + "Task message is missing required field %r" % (f, )) def get_instance_attrs(self, loglevel, logfile): return {"logfile": logfile, "loglevel": loglevel, "hostname": self.hostname, - "id": self.task_id, "taskset": self.taskset_id, + "id": self.id, "taskset": self.taskset, "retries": self.retries, "is_eager": False, "delivery_info": self.delivery_info, "chord": self.chord} @@ -315,13 +218,11 @@ def extend_with_default_kwargs(self, loglevel, logfile): in version 3.0. """ - if not self.task.accept_magic_kwargs: - return self.kwargs kwargs = dict(self.kwargs) default_kwargs = {"logfile": logfile, "loglevel": loglevel, - "task_id": self.task_id, - "task_name": self.task_name, + "task_id": self.id, + "task_name": self.name, "task_retries": self.retries, "task_is_eager": False, "delivery_info": self.delivery_info} @@ -344,13 +245,16 @@ def execute_using_pool(self, pool, loglevel=None, logfile=None): """ if self.revoked(): return + request = self.request_dict - args = self._get_tracer_args(loglevel, logfile) - instance_attrs = self.get_instance_attrs(loglevel, logfile) + kwargs = self.kwargs + if self.task.accept_magic_kwargs: + kwargs = self.extend_with_default_kwargs(loglevel, logfile) + request.update({"loglevel": loglevel, "logfile": logfile}) result = pool.apply_async(execute_and_trace, - args=args, + args=(self.name, self.id, self.args, kwargs), kwargs={"hostname": self.hostname, - "request": instance_attrs}, + "request": request}, accept_callback=self.on_accepted, timeout_callback=self.on_timeout, callback=self.on_success, @@ -360,7 +264,7 @@ def execute_using_pool(self, pool, loglevel=None, logfile=None): return result def execute(self, loglevel=None, logfile=None): - """Execute the task in a :class:`WorkerTaskTrace`. + """Execute the task in a :func:`~celery.execute.trace.trace_task`. :keyword loglevel: The loglevel used by the task. @@ -375,20 +279,19 @@ def execute(self, loglevel=None, logfile=None): self.acknowledge() instance_attrs = self.get_instance_attrs(loglevel, logfile) - tracer = WorkerTaskTrace(*self._get_tracer_args(loglevel, logfile), - **{"hostname": self.hostname, - "loader": self.app.loader, - "request": instance_attrs}) - retval = tracer.execute() + retval, _ = trace_task(*self._get_tracer_args(loglevel, logfile, True), + **{"hostname": self.hostname, + "loader": self.app.loader, + "request": instance_attrs}) self.acknowledge() return retval def maybe_expire(self): """If expired, mark the task as revoked.""" - if self.expires and datetime.now() > self.expires: - state.revoked.add(self.task_id) - if self._store_errors: - self.task.backend.mark_as_revoked(self.task_id) + if self.expires and datetime.now(self.tzlocal) > self.expires: + state.revoked.add(self.id) + if self.store_errors: + self.task.backend.mark_as_revoked(self.id) def terminate(self, pool, signal=None): if self.time_start: @@ -402,10 +305,10 @@ def revoked(self): return True if self.expires: self.maybe_expire() - if self.task_id in state.revoked: + if self.id in state.revoked: self.logger.warn("Skipping revoked task: %s[%s]", - self.task_name, self.task_id) - self.send_event("task-revoked", uuid=self.task_id) + self.name, self.id) + self.send_event("task-revoked", uuid=self.id) self.acknowledge() self._already_revoked = True return True @@ -422,9 +325,10 @@ def on_accepted(self, pid, time_accepted): state.task_accepted(self) if not self.task.acks_late: self.acknowledge() - self.send_event("task-started", uuid=self.task_id, pid=pid) - self.logger.debug("Task accepted: %s[%s] pid:%r", - self.task_name, self.task_id, pid) + self.send_event("task-started", uuid=self.id, pid=pid) + if self._does_debug: + self.logger.debug("Task accepted: %s[%s] pid:%r", + self.name, self.id, pid) if self._terminate_on_ack is not None: _, pool, signal = self._terminate_on_ack self.terminate(pool, signal) @@ -434,15 +338,15 @@ def on_timeout(self, soft, timeout): state.task_ready(self) if soft: self.logger.warning("Soft time limit (%ss) exceeded for %s[%s]", - timeout, self.task_name, self.task_id) + timeout, self.name, self.id) exc = exceptions.SoftTimeLimitExceeded(timeout) else: self.logger.error("Hard time limit (%ss) exceeded for %s[%s]", - timeout, self.task_name, self.task_id) + timeout, self.name, self.id) exc = exceptions.TimeLimitExceeded(timeout) - if self._store_errors: - self.task.backend.mark_as_failure(self.task_id, exc) + if self.store_errors: + self.task.backend.mark_as_failure(self.id, exc) def on_success(self, ret_value): """Handler called if the task was successfully processed.""" @@ -452,26 +356,28 @@ def on_success(self, ret_value): self.acknowledge() runtime = self.time_start and (time.time() - self.time_start) or 0 - self.send_event("task-succeeded", uuid=self.task_id, + self.send_event("task-succeeded", uuid=self.id, result=safe_repr(ret_value), runtime=runtime) - self.logger.info(self.success_msg.strip(), - {"id": self.task_id, - "name": self.task_name, - "return_value": self.repr_result(ret_value), - "runtime": runtime}) + if self._does_info: + self.logger.info(self.success_msg.strip(), + {"id": self.id, + "name": self.name, + "return_value": self.repr_result(ret_value), + "runtime": runtime}) def on_retry(self, exc_info): """Handler called if the task should be retried.""" - self.send_event("task-retried", uuid=self.task_id, + self.send_event("task-retried", uuid=self.id, exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback)) - self.logger.info(self.retry_msg.strip(), - {"id": self.task_id, - "name": self.task_name, - "exc": safe_repr(exc_info.exception.exc)}, - exc_info=exc_info) + if self._does_info: + self.logger.info(self.retry_msg.strip(), + {"id": self.id, + "name": self.name, + "exc": safe_repr(exc_info.exception.exc)}, + exc_info=exc_info) def on_failure(self, exc_info): """Handler called if the task raised an exception.""" @@ -486,16 +392,16 @@ def on_failure(self, exc_info): # This is a special case as the process would not have had # time to write the result. if isinstance(exc_info.exception, exceptions.WorkerLostError) and \ - self._store_errors: - self.task.backend.mark_as_failure(self.task_id, exc_info.exception) + self.store_errors: + self.task.backend.mark_as_failure(self.id, exc_info.exception) - self.send_event("task-failed", uuid=self.task_id, + self.send_event("task-failed", uuid=self.id, exception=safe_repr(exc_info.exception), traceback=safe_str(exc_info.traceback)) context = {"hostname": self.hostname, - "id": self.task_id, - "name": self.task_name, + "id": self.id, + "name": self.name, "exc": safe_repr(exc_info.exception), "traceback": safe_str(exc_info.traceback), "args": safe_repr(self.args), @@ -503,17 +409,17 @@ def on_failure(self, exc_info): self.logger.error(self.error_msg.strip(), context, exc_info=exc_info.exc_info, - extra={"data": {"id": self.task_id, - "name": self.task_name, + extra={"data": {"id": self.id, + "name": self.name, "hostname": self.hostname}}) - task_obj = registry.tasks.get(self.task_name, object) + task_obj = tasks.get(self.name, object) task_obj.send_error_email(context, exc_info.exception) def acknowledge(self): """Acknowledge task.""" if not self.acknowledged: - self.on_ack() + self.on_ack(self.logger, self.connection_errors) self.acknowledged = True def repr_result(self, result, maxlen=46): @@ -522,8 +428,8 @@ def repr_result(self, result, maxlen=46): return truncate_text(safe_repr(result), maxlen) def info(self, safe=False): - return {"id": self.task_id, - "name": self.task_name, + return {"id": self.id, + "name": self.name, "args": self.args if safe else safe_repr(self.args), "kwargs": self.kwargs if safe else safe_repr(self.kwargs), "hostname": self.hostname, @@ -534,8 +440,7 @@ def info(self, safe=False): def shortinfo(self): return "%s[%s]%s%s" % ( - self.task_name, - self.task_id, + self.name, self.id, " eta:[%s]" % (self.eta, ) if self.eta else "", " expires:[%s]" % (self.expires, ) if self.expires else "") __str__ = shortinfo @@ -543,9 +448,43 @@ def shortinfo(self): def __repr__(self): return '<%s: {name:"%s", id:"%s", args:"%s", kwargs:"%s"}>' % ( self.__class__.__name__, - self.task_name, self.task_id, self.args, self.kwargs) + self.name, self.id, self.args, self.kwargs) + + def _get_tracer_args(self, loglevel=None, logfile=None, use_real=False): + """Get the task trace args for this task.""" + kwargs = self.kwargs + if self.task.accept_magic_kwargs: + kwargs = self.extend_with_default_kwargs(loglevel, logfile) + first = self.task if use_real else self.name + return first, self.id, self.args, kwargs + + @cached_property + def tzlocal(self): + return tz_or_local(self.app.conf.CELERY_TIMEZONE) + + @property + def store_errors(self): + return (not self.task.ignore_result + or self.task.store_errors_even_if_ignored) + + def _compat_get_task_id(self): + return self.id + + def _compat_set_task_id(self, value): + self.id = value + + def _compat_get_task_name(self): + return self.name + + def _compat_set_task_name(self, value): + self.name = value + + def _compat_get_taskset_id(self): + return self.taskset + + def _compat_set_taskset_id(self, value): + self.taskset = value - def _get_tracer_args(self, loglevel=None, logfile=None): - """Get the :class:`WorkerTaskTrace` tracer for this task.""" - task_func_kwargs = self.extend_with_default_kwargs(loglevel, logfile) - return self.task_name, self.task_id, self.args, task_func_kwargs + task_id = property(_compat_get_task_id, _compat_set_task_id) + task_name = property(_compat_get_task_name, _compat_set_task_name) + taskset_id = property(_compat_get_taskset_id, _compat_set_taskset_id) diff --git a/celery/worker/mediator.py b/celery/worker/mediator.py --- a/celery/worker/mediator.py +++ b/celery/worker/mediator.py @@ -18,17 +18,17 @@ """ from __future__ import absolute_import -import os +import logging import sys -import threading import traceback from Queue import Empty from ..app import app_or_default +from ..utils.threads import bgThread -class Mediator(threading.Thread): +class Mediator(bgThread): #: The task queue, a :class:`~Queue.Queue` instance. ready_queue = None @@ -37,17 +37,14 @@ class Mediator(threading.Thread): callback = None def __init__(self, ready_queue, callback, logger=None, app=None): - threading.Thread.__init__(self) self.app = app_or_default(app) self.logger = logger or self.app.log.get_default_logger() self.ready_queue = ready_queue self.callback = callback - self._is_shutdown = threading.Event() - self._is_stopped = threading.Event() - self.setDaemon(True) - self.setName(self.__class__.__name__) + self._does_debug = self.logger.isEnabledFor(logging.DEBUG) + super(Mediator, self).__init__() - def move(self): + def body(self): try: task = self.ready_queue.get(timeout=1.0) except Empty: @@ -56,9 +53,10 @@ def move(self): if task.revoked(): return - self.logger.debug( - "Mediator: Running callback for task: %s[%s]" % ( - task.task_name, task.task_id)) + if self._does_debug: + self.logger.debug( + "Mediator: Running callback for task: %s[%s]" % ( + task.task_name, task.task_id)) try: self.callback(task) @@ -69,20 +67,4 @@ def move(self): extra={"data": {"id": task.task_id, "name": task.task_name, "hostname": task.hostname}}) - - def run(self): - """Move tasks until :meth:`stop` is called.""" - while not self._is_shutdown.isSet(): - try: - self.move() - except Exception, exc: - self.logger.error("Mediator crash: %r", exc, exc_info=True) - # exiting by normal means does not work here, so force exit. - os._exit(1) - self._is_stopped.set() - - def stop(self): - """Gracefully shutdown the thread.""" - self._is_shutdown.set() - self._is_stopped.wait() - self.join(1e10) + move = body # XXX compat diff --git a/celery/worker/state.py b/celery/worker/state.py --- a/celery/worker/state.py +++ b/celery/worker/state.py @@ -48,10 +48,8 @@ #: the list of currently revoked tasks. Persistent if statedb set. revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) - -def task_reserved(request): - """Updates global state when a task has been reserved.""" - reserved_requests.add(request) +#: Updates global state when a task has been reserved. +task_reserved = reserved_requests.add def task_accepted(request): diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py new file mode 100644 --- /dev/null +++ b/celery/worker/strategy.py @@ -0,0 +1,19 @@ +from __future__ import absolute_import + +from .job import TaskRequest + + +def default(task, app, consumer): + logger = consumer.logger + hostname = consumer.hostname + eventer = consumer.event_dispatcher + Request = TaskRequest.from_message + handle = consumer.on_task + connection_errors = consumer.connection_errors + + def task_message_handler(M, B, A): + handle(Request(M, B, A, app=app, logger=logger, + hostname=hostname, eventer=eventer, + connection_errors=connection_errors)) + + return task_message_handler diff --git a/contrib/bundles/generate.py b/contrib/bundles/generate.py new file mode 100644 --- /dev/null +++ b/contrib/bundles/generate.py @@ -0,0 +1,34 @@ +import os +import sys + +sys.path.insert(0, os.path.abspath( + os.path.join(__file__, os.pardir, os.pardir))) + +from celery import VERSION + +from bundle import Bundle + +series = "{}.{}".format(*VERSION[:2]) + +defaults = {"version": series, + "author": "Celery Project", + "author_email": "bundles@celeryproject.org", + "url": "http://celeryproject.org", + "license": "BSD"} + + +bundles = [ + Bundle("celery-with-redis", + "Bundle that installs the dependencies for Celery and Redis", + requires=["celery>=%s,<3.0" % (series, ), "redis>=2.4.4"], + **defaults), +] + + +def main(): + for bundle in bundles: + bundle.bump_if_exists() + print(bundle.render_readme()) + +if __name__ == "__main__": + main() diff --git a/contrib/release/bump_version.py b/contrib/release/bump_version.py new file mode 100755 --- /dev/null +++ b/contrib/release/bump_version.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python + +from __future__ import absolute_import +from __future__ import with_statement + +import errno +import os +import re +import shlex +import subprocess +import sys + +from contextlib import contextmanager +from tempfile import NamedTemporaryFile + +rq = lambda s: s.strip("\"'") + + +def cmd(*args): + return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0] + + +@contextmanager +def no_enoent(): + try: + yield + except OSError, exc: + if exc.errno != errno.ENOENT: + raise + + +class StringVersion(object): + + def decode(self, s): + s = rq(s) + text = "" + major, minor, release = s.split(".") + if not release.isdigit(): + pos = release.index(re.split("\d+", release)[1][0]) + release, text = release[:pos], release[pos:] + return int(major), int(minor), int(release), text + + def encode(self, v): + return ".".join(map(str, v[:3])) + v[3] +to_str = StringVersion().encode +from_str = StringVersion().decode + + +class TupleVersion(object): + + def decode(self, s): + v = list(map(rq, s.split(", "))) + return (tuple(map(int, v[0:3])) + + tuple(["".join(v[3:])])) + + def encode(self, v): + v = list(v) + + def quote(lit): + if isinstance(lit, basestring): + return '"%s"' % (lit, ) + return str(lit) + + if not v[-1]: + v.pop() + return ", ".join(map(quote, v)) + + +class VersionFile(object): + + def __init__(self, filename): + self.filename = filename + self._kept = None + + def _as_orig(self, version): + return self.wb % {"version": self.type.encode(version), + "kept": self._kept} + + def write(self, version): + pattern = self.regex + with no_enoent(): + with NamedTemporaryFile() as dest: + with open(self.filename) as orig: + for line in orig: + if pattern.match(line): + dest.write(self._as_orig(version)) + else: + dest.write(line) + os.rename(dest.name, self.filename) + + def parse(self): + pattern = self.regex + gpos = 0 + with open(self.filename) as fh: + for line in fh: + m = pattern.match(line) + if m: + if "?P<keep>" in pattern.pattern: + self._kept, gpos = m.groupdict()["keep"], 1 + return self.type.decode(m.groups()[gpos]) + + +class PyVersion(VersionFile): + regex = re.compile(r'^VERSION\s*=\s*\((.+?)\)') + wb = "VERSION = (%(version)s)\n" + type = TupleVersion() + + +class SphinxVersion(VersionFile): + regex = re.compile(r'^:[Vv]ersion:\s*(.+?)$') + wb = ':Version: %(version)s\n' + type = StringVersion() + + +class CPPVersion(VersionFile): + regex = re.compile(r'^\#\s*define\s*(?P<keep>\w*)VERSION\s+(.+)') + wb = '#define %(kept)sVERSION "%(version)s"\n' + type = StringVersion() + + +_filetype_to_type = {"py": PyVersion, + "rst": SphinxVersion, + "txt": SphinxVersion, + "c": CPPVersion, + "h": CPPVersion} + +def filetype_to_type(filename): + _, _, suffix = filename.rpartition(".") + return _filetype_to_type[suffix](filename) + + +def bump(*files, **kwargs): + version = kwargs.get("version") + before_commit = kwargs.get("before_commit") + files = [filetype_to_type(f) for f in files] + versions = [v.parse() for v in files] + current = list(reversed(sorted(versions)))[0] # find highest + + if version: + next = from_str(version) + else: + major, minor, release, text = current + if text: + raise Exception("Can't bump alpha releases") + next = (major, minor, release + 1, text) + + print("Bump version from %s -> %s" % (to_str(current), to_str(next))) + + for v in files: + print(" writing %r..." % (v.filename, )) + v.write(next) + + if before_commit: + cmd(*shlex.split(before_commit)) + + print(cmd("git", "commit", "-m", "Bumps version to %s" % (to_str(next), ), + *[f.filename for f in files])) + print(cmd("git", "tag", "v%s" % (to_str(next), ))) + + +def main(argv=sys.argv, version=None, before_commit=None): + if not len(argv) > 1: + print("Usage: distdir [docfile] -- <custom version>") + sys.exit(0) + + args = [] + for arg in argv: + if arg.startswith("--before-commit="): + _, before_commit = arg.split('=') + else: + args.append(arg) + + if "--" in args: + c = args.index('--') + version = args[c + 1] + argv = args[:c] + bump(*args[1:], version=version, before_commit=before_commit) + +if __name__ == "__main__": + main() diff --git a/funtests/bench/worker.py b/funtests/bench/worker.py deleted file mode 100644 --- a/funtests/bench/worker.py +++ /dev/null @@ -1,42 +0,0 @@ -import time - -from celery import Celery - -celery = Celery() -celery.conf.update(BROKER_TRANSPORT="memory", - BROKER_POOL_LIMIT=1, - CELERY_PREFETCH_MULTIPLIER=0, - CELERY_DISABLE_RATE_LIMITS=True, - CELERY_BACKEND=None) - - -def bench_consumer(n=10000): - from celery.worker import WorkController - from celery.worker import state - - worker = WorkController(app=celery, pool_cls="solo") - time_start = [None] - - @celery.task() - def it(i): - if not i: - time_start[0] = time.time() - elif i == n - 1: - print(time.time() - time_start[0]) - - @celery.task() - def shutdown_worker(): - raise SystemExit() - - for i in xrange(n): - it.delay(i) - shutdown_worker.delay() - - try: - worker.start() - except SystemExit: - assert sum(state.total_count.values()) == n + 1 - - -if __name__ == "__main__": - bench_consumer() diff --git a/funtests/benchmarks/bench_worker.py b/funtests/benchmarks/bench_worker.py new file mode 100644 --- /dev/null +++ b/funtests/benchmarks/bench_worker.py @@ -0,0 +1,106 @@ +import os +import sys +import time + +os.environ["NOSETPS"] = "yes" + +import anyjson +JSONIMP = os.environ.get("JSONIMP") +if JSONIMP: + anyjson.force_implementation(JSONIMP) + +print("anyjson implementation: %r" % (anyjson.implementation.name, )) + +from celery import Celery + +DEFAULT_ITS = 20000 + +celery = Celery(__name__) +celery.conf.update(BROKER_TRANSPORT="librabbitmq", + BROKER_POOL_LIMIT=10, + CELERYD_POOL="solo", + CELERY_PREFETCH_MULTIPLIER=0, + CELERY_DISABLE_RATE_LIMITS=True, + CELERY_DEFAULT_DELIVERY_MODE=1, + CELERY_QUEUES = { + "bench.worker": { + "exchange": "bench.worker", + "routing_key": "bench.worker", + "no_ack": True, + "exchange_durable": False, + "queue_durable": False, + } + }, + CELERY_TASK_SERIALIZER="json", + CELERY_DEFAULT_QUEUE="bench.worker", + CELERY_BACKEND=None, + )#CELERY_MESSAGE_COMPRESSION="zlib") + + +def tdiff(then): + return time.time() - then + + +@celery.task(cur=0, time_start=None, queue="bench.worker") +def it(_, n): + i = it.cur # use internal counter, as ordering can be skewed + # by previous runs, or the broker. + if i and not i % 5000: + print >> sys.stderr, "(%s so far: %ss)" % (i, tdiff(it.subt)) + it.subt = time.time() + if not i: + it.subt = it.time_start = time.time() + elif i == n - 1: + total = tdiff(it.time_start) + print >> sys.stderr, "(%s so far: %ss)" % (i, tdiff(it.subt)) + print("-- process %s tasks: %ss total, %s tasks/s} " % ( + n, total, n / (total + .0))) + sys.exit() + it.cur += 1 + + +def bench_apply(n=DEFAULT_ITS): + time_start = time.time() + celery.TaskSet(it.subtask((i, n)) for i in xrange(n)).apply_async() + print("-- apply %s tasks: %ss" % (n, time.time() - time_start, )) + + +def bench_work(n=DEFAULT_ITS, loglevel=None): + loglevel = os.environ.get("BENCH_LOGLEVEL") or loglevel + if loglevel: + celery.log.setup_logging_subsystem(loglevel=loglevel) + worker = celery.WorkController(concurrency=15, + queues=["bench.worker"]) + + try: + print("STARTING WORKER") + worker.start() + except SystemExit: + assert sum(worker.state.total_count.values()) == n + 1 + + +def bench_both(n=DEFAULT_ITS): + bench_apply(n) + bench_work(n) + + +def main(argv=sys.argv): + n = DEFAULT_ITS + if len(argv) < 2: + print("Usage: %s [apply|work|both] [n=20k]" % ( + os.path.basename(argv[0]), )) + return sys.exit(1) + try: + try: + n = int(argv[2]) + except IndexError: + pass + return {"apply": bench_apply, + "work": bench_work, + "both": bench_both}[argv[1]](n=n) + except KeyboardInterrupt: + pass + + +if __name__ == "__main__": + main() diff --git a/pavement.py b/pavement.py --- a/pavement.py +++ b/pavement.py @@ -126,8 +126,9 @@ def readme(options): @task def bump(options): - sh("bump -c celery") - + sh("contrib/release/bump_version.py \ + celery/__init__.py docs/includes/introduction.txt \ + --before-commit='paver readme'") @task @cmdopts([ diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -60,10 +60,41 @@ # -*- Distribution Meta -*- -os.environ["CELERY_NO_EVAL"] = "yes" -import celery as distmeta -os.environ.pop("CELERY_NO_EVAL", None) -sys.modules.pop("celery", None) +import re +re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') +re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)') +re_doc = re.compile(r'^"""(.+?)"""') +rq = lambda s: s.strip("\"'") + +def add_default(m): + attr_name, attr_value = m.groups() + return ((attr_name, rq(attr_value)), ) + + +def add_version(m): + v = list(map(rq, m.groups()[0].split(", "))) + return (("VERSION", ".".join(v[0:3]) + "".join(v[3:])), ) + + +def add_doc(m): + return (("doc", m.groups()[0]), ) + +pats = {re_meta: add_default, + re_vers: add_version, + re_doc: add_doc} +here = os.path.abspath(os.path.dirname(__file__)) +meta_fh = open(os.path.join(here, "celery/__init__.py")) +try: + meta = {} + for line in meta_fh: + if line.strip() == '# -eof meta-': + break + for pattern, handler in pats.items(): + m = pattern.match(line.strip()) + if m: + meta.update(handler(m)) +finally: + meta_fh.close() # -*- Custom Commands -*- @@ -139,11 +170,11 @@ def run(self, *args, **kwargs): setup( name="celery", - version=distmeta.__version__, - description=distmeta.__doc__, - author=distmeta.__author__, - author_email=distmeta.__contact__, - url=distmeta.__homepage__, + version=meta["VERSION"], + description=meta["doc"], + author=meta["author"], + author_email=meta["contact"], + url=meta["homepage"], platforms=["any"], license="BSD", packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']), </patch>
diff --git a/celery/tests/config.py b/celery/tests/config.py --- a/celery/tests/config.py +++ b/celery/tests/config.py @@ -28,3 +28,17 @@ CELERY_REDIS_PORT = int(os.environ.get("REDIS_PORT") or 6379) CELERY_REDIS_DB = os.environ.get("REDIS_DB") or 0 CELERY_REDIS_PASSWORD = os.environ.get("REDIS_PASSWORD") + +# Mongo results tests (only executed if installed and running) +CELERY_MONGODB_BACKEND_SETTINGS = { + "host": os.environ.get("MONGO_HOST") or "localhost", + "port": os.environ.get("MONGO_PORT") or 27017, + "database": os.environ.get("MONGO_DB") or "celery_unittests", + "taskmeta_collection": os.environ.get("MONGO_TASKMETA_COLLECTION") or + "taskmeta_collection", +} +if os.environ.get("MONGO_USER"): + CELERY_MONGODB_BACKEND_SETTINGS["user"] = os.environ.get("MONGO_USER") +if os.environ.get("MONGO_PASSWORD"): + CELERY_MONGODB_BACKEND_SETTINGS["password"] = \ + os.environ.get("MONGO_PASSWORD") diff --git a/celery/tests/functional/case.py b/celery/tests/functional/case.py --- a/celery/tests/functional/case.py +++ b/celery/tests/functional/case.py @@ -13,7 +13,7 @@ from celery.exceptions import TimeoutError from celery.task.control import ping, flatten_reply, inspect -from celery.utils import get_full_cls_name +from celery.utils import qualname from celery.tests.utils import unittest @@ -84,7 +84,7 @@ def ensure_started(self): def managed(cls, hostname=None, caller=None): hostname = hostname or socket.gethostname() if caller: - hostname = ".".join([get_full_cls_name(caller), hostname]) + hostname = ".".join([qualname(caller), hostname]) else: hostname += str(cls.next_worker_id()) worker = cls(hostname) diff --git a/celery/tests/test_app/__init__.py b/celery/tests/test_app/__init__.py --- a/celery/tests/test_app/__init__.py +++ b/celery/tests/test_app/__init__.py @@ -230,13 +230,14 @@ def send(self, type, **fields): assert conn.transport_cls == "memory" pub = self.app.amqp.TaskPublisher(conn, exchange="foo_exchange") - self.assertIn("foo_exchange", amqp._exchanges_declared) + self.assertNotIn("foo_exchange", amqp._exchanges_declared) dispatcher = Dispatcher() self.assertTrue(pub.delay_task("footask", (), {}, exchange="moo_exchange", routing_key="moo_exchange", event_dispatcher=dispatcher)) + self.assertIn("moo_exchange", amqp._exchanges_declared) self.assertTrue(dispatcher.sent) self.assertEqual(dispatcher.sent[0][0], "task-sent") self.assertTrue(pub.delay_task("footask", (), {}, diff --git a/celery/tests/test_backends/test_base.py b/celery/tests/test_backends/test_base.py --- a/celery/tests/test_backends/test_base.py +++ b/celery/tests/test_backends/test_base.py @@ -7,6 +7,7 @@ from mock import Mock from nose import SkipTest +from celery.result import AsyncResult from celery.utils import serialization from celery.utils.serialization import subclass_exception from celery.utils.serialization import \ @@ -100,7 +101,8 @@ def test_on_chord_apply(self, unlock="celery.chord_unlock"): from celery.registry import tasks p, tasks[unlock] = tasks.get(unlock), Mock() try: - b.on_chord_apply("dakj221", "sdokqweok") + b.on_chord_apply("dakj221", "sdokqweok", + result=map(AsyncResult, [1, 2, 3])) self.assertTrue(tasks[unlock].apply_async.call_count) finally: tasks[unlock] = p diff --git a/celery/tests/test_backends/test_cache.py b/celery/tests/test_backends/test_cache.py --- a/celery/tests/test_backends/test_cache.py +++ b/celery/tests/test_backends/test_cache.py @@ -6,10 +6,14 @@ from contextlib import contextmanager +from mock import Mock, patch + from celery import states from celery.backends.cache import CacheBackend, DummyClient from celery.exceptions import ImproperlyConfigured +from celery.registry import tasks from celery.result import AsyncResult +from celery.task import subtask from celery.utils import uuid from celery.utils.encoding import str_to_bytes @@ -24,60 +28,80 @@ def __init__(self, data): class test_CacheBackend(unittest.TestCase): - def test_mark_as_done(self): - tb = CacheBackend(backend="memory://") - - tid = uuid() + def setUp(self): + self.tb = CacheBackend(backend="memory://") + self.tid = uuid() - self.assertEqual(tb.get_status(tid), states.PENDING) - self.assertIsNone(tb.get_result(tid)) + def test_mark_as_done(self): + self.assertEqual(self.tb.get_status(self.tid), states.PENDING) + self.assertIsNone(self.tb.get_result(self.tid)) - tb.mark_as_done(tid, 42) - self.assertEqual(tb.get_status(tid), states.SUCCESS) - self.assertEqual(tb.get_result(tid), 42) + self.tb.mark_as_done(self.tid, 42) + self.assertEqual(self.tb.get_status(self.tid), states.SUCCESS) + self.assertEqual(self.tb.get_result(self.tid), 42) def test_is_pickled(self): - tb = CacheBackend(backend="memory://") - - tid2 = uuid() result = {"foo": "baz", "bar": SomeClass(12345)} - tb.mark_as_done(tid2, result) + self.tb.mark_as_done(self.tid, result) # is serialized properly. - rindb = tb.get_result(tid2) + rindb = self.tb.get_result(self.tid) self.assertEqual(rindb.get("foo"), "baz") self.assertEqual(rindb.get("bar").data, 12345) def test_mark_as_failure(self): - tb = CacheBackend(backend="memory://") - - tid3 = uuid() try: raise KeyError("foo") except KeyError, exception: - pass - tb.mark_as_failure(tid3, exception) - self.assertEqual(tb.get_status(tid3), states.FAILURE) - self.assertIsInstance(tb.get_result(tid3), KeyError) + self.tb.mark_as_failure(self.tid, exception) + self.assertEqual(self.tb.get_status(self.tid), states.FAILURE) + self.assertIsInstance(self.tb.get_result(self.tid), KeyError) - def test_mget(self): + def test_on_chord_apply(self): tb = CacheBackend(backend="memory://") - tb.set("foo", 1) - tb.set("bar", 2) + tb.on_chord_apply("setid", []) + + @patch("celery.result.TaskSetResult") + def test_on_chord_part_return(self, setresult): + tb = CacheBackend(backend="memory://") + + deps = Mock() + deps.total = 2 + setresult.restore.return_value = deps + task = Mock() + task.name = "foobarbaz" + try: + tasks["foobarbaz"] = task + task.request.chord = subtask(task) + task.request.taskset = "setid" - self.assertDictEqual(tb.mget(["foo", "bar"]), + tb.on_chord_apply(task.request.taskset, []) + + self.assertFalse(deps.join.called) + tb.on_chord_part_return(task) + self.assertFalse(deps.join.called) + + tb.on_chord_part_return(task) + deps.join.assert_called_with(propagate=False) + deps.delete.assert_called_with() + + finally: + tasks.pop("foobarbaz") + + def test_mget(self): + self.tb.set("foo", 1) + self.tb.set("bar", 2) + + self.assertDictEqual(self.tb.mget(["foo", "bar"]), {"foo": 1, "bar": 2}) def test_forget(self): - tb = CacheBackend(backend="memory://") - tid = uuid() - tb.mark_as_done(tid, {"foo": "bar"}) - x = AsyncResult(tid, backend=tb) + self.tb.mark_as_done(self.tid, {"foo": "bar"}) + x = AsyncResult(self.tid, backend=self.tb) x.forget() self.assertIsNone(x.result) def test_process_cleanup(self): - tb = CacheBackend(backend="memory://") - tb.process_cleanup() + self.tb.process_cleanup() def test_expires_as_int(self): tb = CacheBackend(backend="memory://", expires=10) @@ -129,8 +153,8 @@ def mock_pylibmc(self): class test_get_best_memcache(unittest.TestCase, MockCacheMixin): def test_pylibmc(self): - with reset_modules("celery.backends.cache"): - with self.mock_pylibmc(): + with self.mock_pylibmc(): + with reset_modules("celery.backends.cache"): from celery.backends import cache cache._imp = [None] self.assertEqual(cache.get_best_memcache().__module__, @@ -157,6 +181,7 @@ def test_cached(self): with self.mock_pylibmc(): with reset_modules("celery.backends.cache"): from celery.backends import cache + cache._imp = [None] cache.get_best_memcache(behaviors={"foo": "bar"}) self.assertTrue(cache._imp[0]) cache.get_best_memcache() diff --git a/celery/tests/test_backends/test_mongodb.py b/celery/tests/test_backends/test_mongodb.py new file mode 100644 --- /dev/null +++ b/celery/tests/test_backends/test_mongodb.py @@ -0,0 +1,64 @@ +from __future__ import absolute_import + +import sys + +from nose import SkipTest + +from celery.backends.mongodb import MongoBackend +from celery.exceptions import ImproperlyConfigured +from celery.tests.utils import unittest +from celery.utils import uuid + + +_no_mongo_msg = "* MongoDB %s. Will not execute related tests." +_no_mongo_msg_emitted = False + + +try: + from pymongo.errors import AutoReconnect +except ImportError: + + class AutoReconnect(Exception): # noqa + pass + + +def get_mongo_or_SkipTest(): + + def emit_no_mongo_msg(reason): + global _no_mongo_msg_emitted + if not _no_mongo_msg_emitted: + sys.stderr.write("\n" + _no_mongo_msg % reason + "\n") + _no_mongo_msg_emitted = True + + try: + tb = MongoBackend() + try: + tb._get_database() + except AutoReconnect, exc: + emit_no_mongo_msg("not running") + raise SkipTest("Can't connect to MongoDB: %s" % (exc, )) + return tb + except ImproperlyConfigured, exc: + if "need to install" in str(exc): + emit_no_mongo_msg("pymongo not installed") + raise SkipTest("pymongo not installed") + emit_no_mongo_msg("not configured") + raise SkipTest("MongoDB not configured correctly: %s" % (exc, )) + + +class TestMongoBackend(unittest.TestCase): + + def test_save__restore__delete_taskset(self): + tb = get_mongo_or_SkipTest() + + tid = uuid() + res = {u"foo": "bar"} + self.assertEqual(tb.save_taskset(tid, res), res) + + res2 = tb.restore_taskset(tid) + self.assertEqual(res2, res) + + tb.delete_taskset(tid) + self.assertIsNone(tb.restore_taskset(tid)) + + self.assertIsNone(tb.restore_taskset("xxx-nonexisting-id")) diff --git a/celery/tests/test_backends/test_redis_unit.py b/celery/tests/test_backends/test_redis_unit.py --- a/celery/tests/test_backends/test_redis_unit.py +++ b/celery/tests/test_backends/test_redis_unit.py @@ -6,6 +6,7 @@ from celery import current_app from celery import states +from celery.result import AsyncResult from celery.registry import tasks from celery.task import subtask from celery.utils import cached_property, uuid @@ -43,10 +44,18 @@ def expire(self, key, expires): def delete(self, key): self.keyspace.pop(key) + def publish(self, key, value): + pass + class redis(object): Redis = Redis + class ConnectionPool(object): + + def __init__(self, **kwargs): + pass + class test_RedisBackend(unittest.TestCase): @@ -93,7 +102,8 @@ def test_expires_is_timedelta(self): self.assertEqual(b.expires, 60) def test_on_chord_apply(self): - self.Backend().on_chord_apply("setid") + self.Backend().on_chord_apply("setid", {}, + result=map(AsyncResult, [1, 2, 3])) def test_mget(self): b = self.MockBackend() diff --git a/celery/tests/test_concurrency/__init__.py b/celery/tests/test_concurrency/__init__.py --- a/celery/tests/test_concurrency/__init__.py +++ b/celery/tests/test_concurrency/__init__.py @@ -1,4 +1,5 @@ from __future__ import absolute_import +from __future__ import with_statement import os @@ -63,3 +64,8 @@ def test_active(self): self.assertFalse(p.active) p._state = p.RUN self.assertTrue(p.active) + + def test_restart(self): + p = BasePool(10) + with self.assertRaises(NotImplementedError): + p.restart() diff --git a/celery/tests/test_concurrency/test_concurrency_processes.py b/celery/tests/test_concurrency/test_concurrency_processes.py --- a/celery/tests/test_concurrency/test_concurrency_processes.py +++ b/celery/tests/test_concurrency/test_concurrency_processes.py @@ -3,6 +3,7 @@ import signal import sys +import time from itertools import cycle @@ -226,3 +227,16 @@ def test_info(self): self.assertEqual(info["max-concurrency"], pool.limit) self.assertIsNone(info["max-tasks-per-child"]) self.assertEqual(info["timeouts"], (5, 10)) + + def test_restart(self): + raise SkipTest("functional test") + def get_pids(pool): + return set([p.pid for p in pool._pool._pool]) + + tp = self.TaskPool(5) + time.sleep(0.5) + tp.start() + pids = get_pids(tp) + tp.restart() + time.sleep(0.5) + self.assertEqual(pids, get_pids(tp)) diff --git a/celery/tests/test_concurrency/test_concurrency_solo.py b/celery/tests/test_concurrency/test_concurrency_solo.py --- a/celery/tests/test_concurrency/test_concurrency_solo.py +++ b/celery/tests/test_concurrency/test_concurrency_solo.py @@ -12,7 +12,6 @@ class test_solo_TaskPool(unittest.TestCase): def test_on_start(self): x = solo.TaskPool() x.on_start() - self.assertTrue(x.pid) def test_on_apply(self): x = solo.TaskPool() diff --git a/celery/tests/test_security/__init__.py b/celery/tests/test_security/__init__.py new file mode 100644 --- /dev/null +++ b/celery/tests/test_security/__init__.py @@ -0,0 +1,75 @@ +from __future__ import absolute_import +""" +Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) + +Generated with:: + + $ openssl genrsa -des3 -passout pass:test -out key1.key 1024 + $ openssl req -new -key key1.key -out key1.csr -passin pass:test + $ cp key1.key key1.key.org + $ openssl rsa -in key1.key.org -out key1.key -passin pass:test + $ openssl x509 -req -days 365 -in cert1.csr \ + -signkey key1.key -out cert1.crt + $ rm key1.key.org cert1.csr + +""" + +KEY1 = """-----BEGIN RSA PRIVATE KEY----- +MIICXgIBAAKBgQDCsmLC+eqL4z6bhtv0nzbcnNXuQrZUoh827jGfDI3kxNZ2LbEy +kJOn7GIl2tPpcY2Dm1sOM8G1XLm/8Izprp4ifpF4Gi0mqz0GquY5dcMNASG9zkRO +J1z8dQUyp3PIUHdQdrKbYQVifkA4dh6Kg27k8/IcdY1lHsaIju4bX7MADwIDAQAB +AoGBAKWpCRWjdiluwu+skO0Up6aRIAop42AhzfN8OuZ81SMJRP2rJTHECI8COATD +rDneb63Ce3ibG0BI1Jf3gr624D806xVqK/SVHZNbfWx0daE3Q43DDk1UdhRF5+0X +HPqqU/IdeW1YGyWJi+IhMTXyGqhZ1BTN+4vHL7NlRpDt6JOpAkEA+xvfRO4Ca7Lw +NEgvW7n+/L9b+xygQBtOA5s260pO+8jMrXvOdCjISaKHD8HZGFN9oUmLsDXXBhjh +j0WCMdsHbQJBAMZ9OIw6M/Uxv5ANPCD58p6PZTb0knXVPMYBFQ7Y/h2HZzqbEyiI +DLGZpAa9/IhVkoCULd/TNytz5rl27KEni+sCQArFQEdZmhr6etkTO4zIpoo6vvw/ +VxRI14jKEIn5Dvg3vae3RryuvyCBax+e5evoMNxJJkexl354dLxLc/ElfuUCQQCq +U14pBvD7ITuPM6w7aAEIi2iBZhIgR2GlT9xwJ0i4si6lHdms2EJ8TKlyl6mSnEvh +RkavYSJgiU6eLC0WhUcNAkEA7vuNcz/uuckmq870qfSzUQJIYLzwVOadEdEEAVy0 +L0usztlKmAH8U/ceQMMJLMI9W4m680JrMf3iS7f+SkgUTA== +-----END RSA PRIVATE KEY-----""" + +KEY2 = """-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDH22L8b9AmST9ABDmQTQ2DWMdDmK5YXZt4AIY81IcsTQ/ccM0C +fwXEP9tdkYwtcxMCWdASwY5pfMy9vFp0hyrRQMSNfuoxAgONuNWPyQoIvY3ZXRe6 +rS+hb/LN4+vdjX+oxmYiQ2HmSB9rh2bepE6Cw+RLJr5sXXq+xZJ+BLt5tQIDAQAB +AoGBAMGBO0Arip/nP6Rd8tYypKjN5nEefX/1cjgoWdC//fj4zCil1vlZv12abm0U +JWNEDd2y0/G1Eow0V5BFtFcrIFowU44LZEiSf7sKXlNHRHlbZmDgNXFZOt7nVbHn +6SN+oCYjaPjji8idYeb3VQXPtqMoMn73MuyxD3k3tWmVLonpAkEA6hsu62qhUk5k +Nt88UZOauU1YizxsWvT0bHioaceE4TEsbO3NZs7dmdJIcRFcU787lANaaIq7Rw26 +qcumME9XhwJBANqMOzsYQ6BX54UzS6x99Jjlq9MEbTCbAEZr/yjopb9f617SwfuE +AEKnIq3HL6/Tnhv3V8Zy3wYHgDoGNeTVe+MCQQDi/nyeNAQ8RFqTgh2Ak/jAmCi0 +yV/fSgj+bHgQKS/FEuMas/IoL4lbrzQivkyhv5lLSX0ORQaWPM+z+A0qZqRdAkBh +XE+Wx/x4ljCh+nQf6AzrgIXHgBVUrfi1Zq9Jfjs4wnaMy793WRr0lpiwaigoYFHz +i4Ei+1G30eeh8dpYk3KZAkB0ucTOsQynDlL5rLGYZ+IcfSfH3w2l5EszY47kKQG9 +Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V +-----END RSA PRIVATE KEY-----""" + +CERT1 = """-----BEGIN CERTIFICATE----- +MIICATCCAWoCCQCR6B3XQcBOvjANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMB4XDTExMDcxOTA5MDgyMloXDTEyMDcxODA5MDgyMlowRTELMAkG +A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 +IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwrJi +wvnqi+M+m4bb9J823JzV7kK2VKIfNu4xnwyN5MTWdi2xMpCTp+xiJdrT6XGNg5tb +DjPBtVy5v/CM6a6eIn6ReBotJqs9BqrmOXXDDQEhvc5ETidc/HUFMqdzyFB3UHay +m2EFYn5AOHYeioNu5PPyHHWNZR7GiI7uG1+zAA8CAwEAATANBgkqhkiG9w0BAQUF +AAOBgQA4+OiJ+pyq9lbEMFYC9K2+e77noHJkwUOs4wO6p1R14ZqSmoIszQ7KEBiH +2HHPMUY6kt4GL1aX4Vr1pUlXXdH5WaEk0fvDYZemILDMqIQJ9ettx8KihZjFGC4k +Y4Sy5xmqdE9Kjjd854gTRRnzpMnJp6+74Ki2X8GHxn3YBM+9Ng== +-----END CERTIFICATE-----""" + +CERT2 = """-----BEGIN CERTIFICATE----- +MIICATCCAWoCCQCV/9A2ZBM37TANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMB4XDTExMDcxOTA5MDkwMloXDTEyMDcxODA5MDkwMlowRTELMAkG +A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 +IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAx9ti +/G/QJkk/QAQ5kE0Ng1jHQ5iuWF2beACGPNSHLE0P3HDNAn8FxD/bXZGMLXMTAlnQ +EsGOaXzMvbxadIcq0UDEjX7qMQIDjbjVj8kKCL2N2V0Xuq0voW/yzePr3Y1/qMZm +IkNh5kgfa4dm3qROgsPkSya+bF16vsWSfgS7ebUCAwEAATANBgkqhkiG9w0BAQUF +AAOBgQBzaZ5vBkzksPhnWb2oobuy6Ne/LMEtdQ//qeVY4sKl2tOJUCSdWRen9fqP +e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS +WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw== +-----END CERTIFICATE-----""" diff --git a/celery/tests/test_security/case.py b/celery/tests/test_security/case.py new file mode 100644 --- /dev/null +++ b/celery/tests/test_security/case.py @@ -0,0 +1,14 @@ +from __future__ import absolute_import + +from nose import SkipTest + +from celery.tests.utils import unittest + + +class SecurityCase(unittest.TestCase): + + def setUp(self): + try: + from OpenSSL import crypto # noqa + except ImportError: + raise SkipTest("OpenSSL.crypto not installed") diff --git a/celery/tests/test_security/test_certificate.py b/celery/tests/test_security/test_certificate.py new file mode 100644 --- /dev/null +++ b/celery/tests/test_security/test_certificate.py @@ -0,0 +1,41 @@ +from __future__ import absolute_import + +from celery.exceptions import SecurityError +from celery.security.certificate import Certificate, CertStore + +from . import CERT1, CERT2, KEY1 +from .case import SecurityCase + + +class TestCertificate(SecurityCase): + + def test_valid_certificate(self): + Certificate(CERT1) + Certificate(CERT2) + + def test_invalid_certificate(self): + self.assertRaises(TypeError, Certificate, None) + self.assertRaises(SecurityError, Certificate, "") + self.assertRaises(SecurityError, Certificate, "foo") + self.assertRaises(SecurityError, Certificate, CERT1[:20] + CERT1[21:]) + self.assertRaises(SecurityError, Certificate, KEY1) + + +class TestCertStore(SecurityCase): + + def test_itercerts(self): + cert1 = Certificate(CERT1) + cert2 = Certificate(CERT2) + certstore = CertStore() + for c in certstore.itercerts(): + self.assertTrue(False) + certstore.add_cert(cert1) + certstore.add_cert(cert2) + for c in certstore.itercerts(): + self.assertIn(c, (cert1, cert2)) + + def test_duplicate(self): + cert1 = Certificate(CERT1) + certstore = CertStore() + certstore.add_cert(cert1) + self.assertRaises(SecurityError, certstore.add_cert, cert1) diff --git a/celery/tests/test_security/test_key.py b/celery/tests/test_security/test_key.py new file mode 100644 --- /dev/null +++ b/celery/tests/test_security/test_key.py @@ -0,0 +1,21 @@ +from __future__ import absolute_import + +from celery.exceptions import SecurityError +from celery.security.key import PrivateKey + +from . import CERT1, KEY1, KEY2 +from .case import SecurityCase + + +class TestKey(SecurityCase): + + def test_valid_private_key(self): + PrivateKey(KEY1) + PrivateKey(KEY2) + + def test_invalid_private_key(self): + self.assertRaises(TypeError, PrivateKey, None) + self.assertRaises(SecurityError, PrivateKey, "") + self.assertRaises(SecurityError, PrivateKey, "foo") + self.assertRaises(SecurityError, PrivateKey, KEY1[:20] + KEY1[21:]) + self.assertRaises(SecurityError, PrivateKey, CERT1) diff --git a/celery/tests/test_security/test_serialization.py b/celery/tests/test_security/test_serialization.py new file mode 100644 --- /dev/null +++ b/celery/tests/test_security/test_serialization.py @@ -0,0 +1,50 @@ +from __future__ import absolute_import + +from celery.exceptions import SecurityError + +from celery.security.serialization import SecureSerializer +from celery.security.certificate import Certificate, CertStore +from celery.security.key import PrivateKey + +from . import CERT1, CERT2, KEY1, KEY2 +from .case import SecurityCase + + +class TestSecureSerializer(SecurityCase): + + def _get_s(self, key, cert, certs): + store = CertStore() + for c in certs: + store.add_cert(Certificate(c)) + return SecureSerializer(PrivateKey(key), Certificate(cert), store) + + def test_serialize(self): + s = self._get_s(KEY1, CERT1, [CERT1]) + self.assertEqual(s.deserialize(s.serialize("foo")), "foo") + + def test_deserialize(self): + s = self._get_s(KEY1, CERT1, [CERT1]) + self.assertRaises(SecurityError, s.deserialize, "bad data") + + def test_unmatched_key_cert(self): + s = self._get_s(KEY1, CERT2, [CERT1, CERT2]) + self.assertRaises(SecurityError, + s.deserialize, s.serialize("foo")) + + def test_unknown_source(self): + s1 = self._get_s(KEY1, CERT1, [CERT2]) + s2 = self._get_s(KEY1, CERT1, []) + self.assertRaises(SecurityError, + s1.deserialize, s1.serialize("foo")) + self.assertRaises(SecurityError, + s2.deserialize, s2.serialize("foo")) + + def test_self_send(self): + s1 = self._get_s(KEY1, CERT1, [CERT1]) + s2 = self._get_s(KEY1, CERT1, [CERT1]) + self.assertEqual(s2.deserialize(s1.serialize("foo")), "foo") + + def test_separate_ends(self): + s1 = self._get_s(KEY1, CERT1, [CERT2]) + s2 = self._get_s(KEY2, CERT2, [CERT1]) + self.assertEqual(s2.deserialize(s1.serialize("foo")), "foo") diff --git a/celery/tests/test_task/__init__.py b/celery/tests/test_task/__init__.py --- a/celery/tests/test_task/__init__.py +++ b/celery/tests/test_task/__init__.py @@ -4,8 +4,6 @@ from datetime import datetime, timedelta from functools import wraps -from mock import Mock - from celery import task from celery.app import app_or_default from celery.task import task as task_dec @@ -306,8 +304,8 @@ def test_regular_task(self): # With eta. presult2 = t1.apply_async(kwargs=dict(name="George Costanza"), - eta=datetime.now() + timedelta(days=1), - expires=datetime.now() + timedelta(days=2)) + eta=datetime.utcnow() + timedelta(days=1), + expires=datetime.utcnow() + timedelta(days=2)) self.assertNextTaskDataEqual(consumer, presult2, t1.name, name="George Costanza", test_eta=True, test_expires=True) @@ -343,10 +341,8 @@ def test_task_class_repr(self): def test_after_return(self): task = self.createTaskCls("T1", "c.unittest.t.after_return")() - task.backend = Mock() task.request.chord = return_True_task.subtask() task.after_return("SUCCESS", 1.0, "foobar", (), {}, None) - task.backend.on_chord_part_return.assert_called_with(task) task.request.clear() def test_send_task_sent_event(self): @@ -522,11 +518,11 @@ def test_must_have_run_every(self): def test_remaining_estimate(self): self.assertIsInstance( - MyPeriodic().remaining_estimate(datetime.now()), + MyPeriodic().remaining_estimate(datetime.utcnow()), timedelta) def test_is_due_not_due(self): - due, remaining = MyPeriodic().is_due(datetime.now()) + due, remaining = MyPeriodic().is_due(datetime.utcnow()) self.assertFalse(due) # This assertion may fail if executed in the # first minute of an hour, thus 59 instead of 60 @@ -534,7 +530,7 @@ def test_is_due_not_due(self): def test_is_due(self): p = MyPeriodic() - due, remaining = p.is_due(datetime.now() - p.run_every.run_every) + due, remaining = p.is_due(datetime.utcnow() - p.run_every.run_every) self.assertTrue(due) self.assertEqual(remaining, p.timedelta_seconds(p.run_every.run_every)) @@ -710,7 +706,7 @@ def test_not_weekday(self): class test_crontab_is_due(unittest.TestCase): def setUp(self): - self.now = datetime.now() + self.now = datetime.utcnow() self.next_minute = 60 - self.now.second - 1e-6 * self.now.microsecond def test_default_crontab_spec(self): diff --git a/celery/tests/test_task/test_chord.py b/celery/tests/test_task/test_chord.py --- a/celery/tests/test_task/test_chord.py +++ b/celery/tests/test_task/test_chord.py @@ -3,6 +3,7 @@ from mock import patch from celery import current_app +from celery.result import AsyncResult from celery.task import chords from celery.task import TaskSet from celery.tests.utils import AppCase, Mock @@ -15,36 +16,62 @@ def add(x, y): return x + y +@current_app.task +def callback(r): + return r + + +class TSR(chords.TaskSetResult): + is_ready = True + value = [2, 4, 8, 6] + + def ready(self): + return self.is_ready + + def join(self, **kwargs): + return self.value + + def join_native(self, **kwargs): + return self.value + + class test_unlock_chord_task(AppCase): - @patch("celery.task.chords.TaskSetResult") @patch("celery.task.chords._unlock_chord.retry") - def test_unlock_ready(self, retry, TaskSetResult): - callback = Mock() - result = Mock(attrs=dict(ready=lambda: True, - join=lambda **kw: [2, 4, 8, 6])) - TaskSetResult.restore = lambda setid: result + def test_unlock_ready(self, retry): + callback.apply_async = Mock() + + pts, chords.TaskSetResult = chords.TaskSetResult, TSR subtask, chords.subtask = chords.subtask, passthru try: - chords._unlock_chord("setid", callback) + chords._unlock_chord("setid", callback.subtask(), + result=map(AsyncResult, [1, 2, 3])) finally: chords.subtask = subtask - callback.delay.assert_called_with([2, 4, 8, 6]) - result.delete.assert_called_with() + chords.TaskSetResult = pts + callback.apply_async.assert_called_with(([2, 4, 8, 6], ), {}) # did not retry self.assertFalse(retry.call_count) @patch("celery.task.chords.TaskSetResult") @patch("celery.task.chords._unlock_chord.retry") def test_when_not_ready(self, retry, TaskSetResult): - callback = Mock() - result = Mock(attrs=dict(ready=lambda: False)) - TaskSetResult.restore = lambda setid: result - chords._unlock_chord("setid", callback, interval=10, max_retries=30) - self.assertFalse(callback.delay.call_count) - # did retry - chords._unlock_chord.retry.assert_called_with(countdown=10, - max_retries=30) + callback.apply_async = Mock() + + class NeverReady(TSR): + is_ready = False + + pts, chords.TaskSetResult = chords.TaskSetResult, NeverReady + try: + chords._unlock_chord("setid", callback.subtask, interval=10, + max_retries=30, + result=map(AsyncResult, [1, 2, 3])) + self.assertFalse(callback.apply_async.call_count) + # did retry + chords._unlock_chord.retry.assert_called_with(countdown=10, + max_retries=30) + finally: + chords.TaskSetResult = pts def test_is_in_registry(self): from celery.registry import tasks diff --git a/celery/tests/test_task/test_execute_trace.py b/celery/tests/test_task/test_execute_trace.py --- a/celery/tests/test_task/test_execute_trace.py +++ b/celery/tests/test_task/test_execute_trace.py @@ -1,26 +1,34 @@ from __future__ import absolute_import from __future__ import with_statement -import operator - +from celery import current_app from celery import states from celery.exceptions import RetryTaskError -from celery.execute.trace import TraceInfo +from celery.execute.trace import eager_trace_task from celery.tests.utils import unittest -trace = TraceInfo.trace + +@current_app.task +def add(x, y): + return x + y +@current_app.task def raises(exc): raise exc -class test_TraceInfo(unittest.TestCase): +def trace(task, args=(), kwargs={}, propagate=False): + return eager_trace_task(task, "id-1", args, kwargs, + propagate=propagate) + + +class test_trace(unittest.TestCase): def test_trace_successful(self): - info = trace(operator.add, (2, 2), {}) - self.assertEqual(info.status, states.SUCCESS) - self.assertEqual(info.retval, 4) + retval, info = trace(add, (2, 2), {}) + self.assertIsNone(info) + self.assertEqual(retval, 4) def test_trace_SystemExit(self): with self.assertRaises(SystemExit): @@ -28,14 +36,14 @@ def test_trace_SystemExit(self): def test_trace_RetryTaskError(self): exc = RetryTaskError("foo", "bar") - info = trace(raises, (exc, ), {}) - self.assertEqual(info.status, states.RETRY) + _, info = trace(raises, (exc, ), {}) + self.assertEqual(info.state, states.RETRY) self.assertIs(info.retval, exc) def test_trace_exception(self): exc = KeyError("foo") - info = trace(raises, (exc, ), {}) - self.assertEqual(info.status, states.FAILURE) + _, info = trace(raises, (exc, ), {}) + self.assertEqual(info.state, states.FAILURE) self.assertIs(info.retval, exc) def test_trace_exception_propagate(self): diff --git a/celery/tests/test_utils/__init__.py b/celery/tests/test_utils/__init__.py --- a/celery/tests/test_utils/__init__.py +++ b/celery/tests/test_utils/__init__.py @@ -33,9 +33,10 @@ def test_chunks(self): class test_utils(unittest.TestCase): - def test_get_full_cls_name(self): + def test_qualname(self): Class = type("Fox", (object, ), {"__module__": "quick.brown"}) - self.assertEqual(utils.get_full_cls_name(Class), "quick.brown.Fox") + self.assertEqual(utils.qualname(Class), "quick.brown.Fox") + self.assertEqual(utils.qualname(Class()), "quick.brown.Fox") def test_is_iterable(self): for a in "f", ["f"], ("f", ), {"f": "f"}: diff --git a/celery/tests/test_worker/__init__.py b/celery/tests/test_worker/__init__.py --- a/celery/tests/test_worker/__init__.py +++ b/celery/tests/test_worker/__init__.py @@ -297,6 +297,7 @@ def test_receive_message_eta_OverflowError(self, to_timestamp): eta=datetime.now().isoformat()) l.event_dispatcher = Mock() l.pidbox_node = MockNode() + l.update_strategies() l.receive_message(m.decode(), m) self.assertTrue(m.acknowledged) @@ -308,6 +309,7 @@ def test_receive_message_InvalidTaskError(self): send_events=False) m = create_message(Mock(), task=foo_task.name, args=(1, 2), kwargs="foobarbaz", id=1) + l.update_strategies() l.event_dispatcher = Mock() l.pidbox_node = MockNode() @@ -336,6 +338,7 @@ def test_receieve_message(self): send_events=False) m = create_message(Mock(), task=foo_task.name, args=[2, 4, 8], kwargs={}) + l.update_strategies() l.event_dispatcher = Mock() l.receive_message(m.decode(), m) @@ -463,6 +466,7 @@ def test_receieve_message_eta_isoformat(self): l.qos = QoS(l.task_consumer, l.initial_prefetch_count, l.logger) l.event_dispatcher = Mock() l.enabled = False + l.update_strategies() l.receive_message(m.decode(), m) l.eta_schedule.stop() diff --git a/celery/tests/test_worker/test_worker_autoscale.py b/celery/tests/test_worker/test_worker_autoscale.py --- a/celery/tests/test_worker/test_worker_autoscale.py +++ b/celery/tests/test_worker/test_worker_autoscale.py @@ -1,6 +1,7 @@ from __future__ import absolute_import import logging +import sys from time import time @@ -53,7 +54,7 @@ class Scaler(autoscale.Autoscaler): alive = True joined = False - def isAlive(self): + def is_alive(self): return self.alive def join(self, timeout=None): @@ -90,7 +91,7 @@ def test_run(self): class Scaler(autoscale.Autoscaler): scale_called = False - def scale(self): + def body(self): self.scale_called = True self._is_shutdown.set() @@ -140,12 +141,16 @@ def test_thread_crash(self, _exit): class _Autoscaler(autoscale.Autoscaler): - def scale(self): + def body(self): self._is_shutdown.set() raise OSError("foo") - x = _Autoscaler(self.pool, 10, 3, logger=logger) - x.logger = Mock() - x.run() + + stderr = Mock() + p, sys.stderr = sys.stderr, stderr + try: + x.run() + finally: + sys.stderr = p _exit.assert_called_with(1) - self.assertTrue(x.logger.error.call_count) + self.assertTrue(stderr.write.call_count) diff --git a/celery/tests/test_worker/test_worker_control.py b/celery/tests/test_worker/test_worker_control.py --- a/celery/tests/test_worker/test_worker_control.py +++ b/celery/tests/test_worker/test_worker_control.py @@ -1,12 +1,13 @@ from __future__ import absolute_import from __future__ import with_statement +import sys import socket from datetime import datetime, timedelta from kombu import pidbox -from mock import Mock +from mock import Mock, patch from celery import current_app from celery.datastructures import AttributeDict @@ -19,8 +20,8 @@ from celery.worker.job import TaskRequest from celery.worker import state from celery.worker.state import revoked -from celery.worker.control import builtins -from celery.worker.control.registry import Panel +from celery.worker import control +from celery.worker.control import Panel from celery.tests.utils import unittest hostname = socket.gethostname() @@ -39,8 +40,8 @@ class Consumer(object): def __init__(self): self.ready_queue = FastQueue() - self.ready_queue.put(TaskRequest(task_name=mytask.name, - task_id=uuid(), + self.ready_queue.put(TaskRequest(mytask.name, + uuid(), args=(2, 2), kwargs={})) self.eta_schedule = Timer() @@ -337,12 +338,12 @@ def test_revoke_terminate(self): request.task_id = tid = uuid() state.active_requests.add(request) try: - r = builtins.revoke(Mock(), tid, terminate=True) + r = control.revoke(Mock(), tid, terminate=True) self.assertIn(tid, revoked) self.assertTrue(request.terminate.call_count) self.assertIn("terminated", r["ok"]) # unknown task id only revokes - r = builtins.revoke(Mock(), uuid(), terminate=True) + r = control.revoke(Mock(), uuid(), terminate=True) self.assertIn("revoked", r["ok"]) finally: state.active_requests.discard(request) @@ -376,3 +377,61 @@ def reply(self, data, exchange, routing_key, **kwargs): "routing_key": "x"}) self.assertEqual(r, "pong") self.assertDictEqual(replies[0], {panel.hostname: "pong"}) + + def test_pool_restart(self): + consumer = Consumer() + consumer.pool.restart = Mock() + panel = self.create_panel(consumer=consumer) + panel.app = self.app + _import = panel.app.loader.import_from_cwd = Mock() + _reload = Mock() + + panel.handle("pool_restart", {"reload": _reload}) + self.assertTrue(consumer.pool.restart.called) + self.assertFalse(_reload.called) + self.assertFalse(_import.called) + + def test_pool_restart_import_modules(self): + consumer = Consumer() + consumer.pool.restart = Mock() + panel = self.create_panel(consumer=consumer) + panel.app = self.app + _import = panel.app.loader.import_from_cwd = Mock() + _reload = Mock() + + panel.handle("pool_restart", {"imports": ["foo", "bar"], + "reload": _reload}) + + self.assertTrue(consumer.pool.restart.called) + self.assertFalse(_reload.called) + self.assertEqual([(("foo",), {}), (("bar",), {})], + _import.call_args_list) + + def test_pool_restart_relaod_modules(self): + consumer = Consumer() + consumer.pool.restart = Mock() + panel = self.create_panel(consumer=consumer) + panel.app = self.app + _import = panel.app.loader.import_from_cwd = Mock() + _reload = Mock() + + with patch.dict(sys.modules, {"foo": None}): + panel.handle("pool_restart", {"imports": ["foo"], + "reload_imports": False, + "reload": _reload}) + + self.assertTrue(consumer.pool.restart.called) + self.assertFalse(_reload.called) + self.assertFalse(_import.called) + + _import.reset_mock() + _reload.reset_mock() + consumer.pool.restart.reset_mock() + + panel.handle("pool_restart", {"imports": ["foo"], + "reload_imports": True, + "reload": _reload}) + + self.assertTrue(consumer.pool.restart.called) + self.assertTrue(_reload.called) + self.assertFalse(_import.called) diff --git a/celery/tests/test_worker/test_worker_heartbeat.py b/celery/tests/test_worker/test_worker_heartbeat.py --- a/celery/tests/test_worker/test_worker_heartbeat.py +++ b/celery/tests/test_worker/test_worker_heartbeat.py @@ -10,6 +10,9 @@ class MockDispatcher(object): def __init__(self): self.sent = [] + self.on_enabled = set() + self.on_disabled = set() + self.enabled = True def send(self, msg, **_fields): self.sent.append(msg) diff --git a/celery/tests/test_worker/test_worker_job.py b/celery/tests/test_worker/test_worker_job.py --- a/celery/tests/test_worker/test_worker_job.py +++ b/celery/tests/test_worker/test_worker_job.py @@ -7,6 +7,7 @@ import os import sys import time +import warnings from datetime import datetime, timedelta @@ -18,16 +19,17 @@ from celery.app import app_or_default from celery.concurrency.base import BasePool from celery.datastructures import ExceptionInfo -from celery.task import task as task_dec -from celery.exceptions import RetryTaskError, NotRegistered, WorkerLostError +from celery.exceptions import (RetryTaskError, + WorkerLostError, InvalidTaskError) +from celery.execute.trace import eager_trace_task, TraceInfo from celery.log import setup_logger +from celery.registry import tasks from celery.result import AsyncResult +from celery.task import task as task_dec from celery.task.base import Task from celery.utils import uuid -from celery.utils.encoding import from_utf8 -from celery.worker.job import (WorkerTaskTrace, TaskRequest, - InvalidTaskError, execute_and_trace, - default_encode) +from celery.utils.encoding import from_utf8, default_encode +from celery.worker.job import TaskRequest, execute_and_trace from celery.worker.state import revoked from celery.tests.compat import catch_warnings @@ -39,11 +41,11 @@ some_kwargs_scratchpad = {} -def jail(task_id, task_name, args, kwargs): - return WorkerTaskTrace(task_name, task_id, args, kwargs)() +def jail(task_id, name, args, kwargs): + return eager_trace_task(tasks[name], task_id, args, kwargs, eager=False)[0] -def on_ack(): +def on_ack(*args, **kwargs): scratch["ACK"] = True @@ -109,7 +111,7 @@ def test_retry_task_error(self): self.assertEqual(ret.exc, exc) -class test_WorkerTaskTrace(unittest.TestCase): +class test_trace_task(unittest.TestCase): def test_process_cleanup_fails(self): backend = mytask.backend @@ -122,7 +124,8 @@ def test_process_cleanup_fails(self): tid = uuid() ret = jail(tid, mytask.name, [2], {}) self.assertEqual(ret, 4) - mytask.backend.mark_as_done.assert_called_with(tid, 4) + mytask.backend.store_result.assert_called_with(tid, 4, + states.SUCCESS) logs = sio.getvalue().strip() self.assertIn("Process cleanup failed", logs) finally: @@ -143,15 +146,16 @@ def test_execute_jail_success(self): self.assertEqual(ret, 4) def test_marked_as_started(self): - mytask.track_started = True class Backend(mytask.backend.__class__): _started = [] - def mark_as_started(self, tid, *args, **kwargs): - self._started.append(tid) + def store_result(self, tid, meta, state): + if state == states.STARTED: + self._started.append(tid) prev, mytask.backend = mytask.backend, Backend() + mytask.track_started = True try: tid = uuid() @@ -168,17 +172,26 @@ def mark_as_started(self, tid, *args, **kwargs): mytask.ignore_result = False def test_execute_jail_failure(self): - ret = jail(uuid(), mytask_raising.name, - [4], {}) - self.assertIsInstance(ret, ExceptionInfo) - self.assertTupleEqual(ret.exception.args, (4, )) + u = uuid() + mytask_raising.request.update({"id": u}) + try: + ret = jail(u, mytask_raising.name, + [4], {}) + self.assertIsInstance(ret, ExceptionInfo) + self.assertTupleEqual(ret.exception.args, (4, )) + finally: + mytask_raising.request.clear() def test_execute_ignore_result(self): task_id = uuid() - ret = jail(id, MyTaskIgnoreResult.name, - [4], {}) - self.assertEqual(ret, 256) - self.assertFalse(AsyncResult(task_id).ready()) + MyTaskIgnoreResult.request.update({"id": task_id}) + try: + ret = jail(task_id, MyTaskIgnoreResult.name, + [4], {}) + self.assertEqual(ret, 256) + self.assertFalse(AsyncResult(task_id).ready()) + finally: + MyTaskIgnoreResult.request.clear() class MockEventDispatcher(object): @@ -200,10 +213,10 @@ def test_sets_store_errors(self): mytask.ignore_result = True try: tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}) - self.assertFalse(tw._store_errors) + self.assertFalse(tw.store_errors) mytask.store_errors_even_if_ignored = True tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}) - self.assertTrue(tw._store_errors) + self.assertTrue(tw.store_errors) finally: mytask.ignore_result = False mytask.store_errors_even_if_ignored = False @@ -242,16 +255,16 @@ def test_terminate__task_reserved(self): tw.terminate(pool, signal="KILL") def test_revoked_expires_expired(self): - tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}) - tw.expires = datetime.now() - timedelta(days=1) + tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}, + expires=datetime.utcnow() - timedelta(days=1)) tw.revoked() self.assertIn(tw.task_id, revoked) self.assertEqual(mytask.backend.get_status(tw.task_id), states.REVOKED) def test_revoked_expires_not_expired(self): - tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}) - tw.expires = datetime.now() + timedelta(days=1) + tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}, + expires=datetime.utcnow() + timedelta(days=1)) tw.revoked() self.assertNotIn(tw.task_id, revoked) self.assertNotEqual(mytask.backend.get_status(tw.task_id), @@ -259,9 +272,9 @@ def test_revoked_expires_not_expired(self): def test_revoked_expires_ignore_result(self): mytask.ignore_result = True - tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}) + tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}, + expires=datetime.utcnow() - timedelta(days=1)) try: - tw.expires = datetime.now() - timedelta(days=1) tw.revoked() self.assertIn(tw.task_id, revoked) self.assertNotEqual(mytask.backend.get_status(tw.task_id), @@ -449,32 +462,33 @@ def error(self, msg, *args, **kwargs): tw = TaskRequest(mytask.name, uuid(), [1], {"f": "x"}) tw.logger = MockLogger() finally: - mytask.ignore_result = False tw.on_timeout(soft=True, timeout=1336) self.assertEqual(mytask.backend.get_status(tw.task_id), states.PENDING) + mytask.ignore_result = False def test_execute_and_trace(self): res = execute_and_trace(mytask.name, uuid(), [4], {}) self.assertEqual(res, 4 ** 4) def test_execute_safe_catches_exception(self): - old_exec = WorkerTaskTrace.execute + warnings.resetwarnings() def _error_exec(self, *args, **kwargs): raise KeyError("baz") - WorkerTaskTrace.execute = _error_exec - try: - with catch_warnings(record=True) as log: - res = execute_and_trace(mytask.name, uuid(), - [4], {}) - self.assertIsInstance(res, ExceptionInfo) - self.assertTrue(log) - self.assertIn("Exception outside", log[0].message.args[0]) - self.assertIn("KeyError", log[0].message.args[0]) - finally: - WorkerTaskTrace.execute = old_exec + @task_dec + def raising(): + raise KeyError("baz") + raising.request = None + + with catch_warnings(record=True) as log: + res = execute_and_trace(raising.name, uuid(), + [], {}) + self.assertIsInstance(res, ExceptionInfo) + self.assertTrue(log) + self.assertIn("Exception outside", log[0].message.args[0]) + self.assertIn("AttributeError", log[0].message.args[0]) def create_exception(self, exc): try: @@ -485,27 +499,31 @@ def create_exception(self, exc): def test_worker_task_trace_handle_retry(self): from celery.exceptions import RetryTaskError tid = uuid() - w = WorkerTaskTrace(mytask.name, tid, [4], {}) - type_, value_, tb_ = self.create_exception(ValueError("foo")) - type_, value_, tb_ = self.create_exception(RetryTaskError(str(value_), - exc=value_)) - w._store_errors = False - w.handle_retry(value_, type_, tb_, "") - self.assertEqual(mytask.backend.get_status(tid), states.PENDING) - w._store_errors = True - w.handle_retry(value_, type_, tb_, "") - self.assertEqual(mytask.backend.get_status(tid), states.RETRY) + mytask.request.update({"id": tid}) + try: + _, value_, _ = self.create_exception(ValueError("foo")) + einfo = self.create_exception(RetryTaskError(str(value_), + exc=value_)) + w = TraceInfo(states.RETRY, einfo[1], einfo) + w.handle_retry(mytask, store_errors=False) + self.assertEqual(mytask.backend.get_status(tid), states.PENDING) + w.handle_retry(mytask, store_errors=True) + self.assertEqual(mytask.backend.get_status(tid), states.RETRY) + finally: + mytask.request.clear() def test_worker_task_trace_handle_failure(self): tid = uuid() - w = WorkerTaskTrace(mytask.name, tid, [4], {}) - type_, value_, tb_ = self.create_exception(ValueError("foo")) - w._store_errors = False - w.handle_failure(value_, type_, tb_, "") - self.assertEqual(mytask.backend.get_status(tid), states.PENDING) - w._store_errors = True - w.handle_failure(value_, type_, tb_, "") - self.assertEqual(mytask.backend.get_status(tid), states.FAILURE) + mytask.request.update({"id": tid}) + try: + einfo = self.create_exception(ValueError("foo")) + w = TraceInfo(states.FAILURE, einfo[1], einfo) + w.handle_failure(mytask, store_errors=False) + self.assertEqual(mytask.backend.get_status(tid), states.PENDING) + w.handle_failure(mytask, store_errors=True) + self.assertEqual(mytask.backend.get_status(tid), states.FAILURE) + finally: + mytask.request.clear() def test_task_wrapper_mail_attrs(self): tw = TaskRequest(mytask.name, uuid(), [], {}) @@ -533,8 +551,9 @@ def test_from_message(self): self.assertEqual(tw.task_id, body["id"]) self.assertEqual(tw.args, body["args"]) us = from_utf8(us) - self.assertEqual(tw.kwargs.keys()[0], us) - self.assertIsInstance(tw.kwargs.keys()[0], str) + if sys.version_info < (2, 6): + self.assertEqual(tw.kwargs.keys()[0], us) + self.assertIsInstance(tw.kwargs.keys()[0], str) self.assertTrue(tw.logger) def test_from_message_empty_args(self): @@ -561,7 +580,7 @@ def test_from_message_nonexistant_task(self): m = Message(None, body=anyjson.serialize(body), backend="foo", content_type="application/json", content_encoding="utf-8") - with self.assertRaises(NotRegistered): + with self.assertRaises(KeyError): TaskRequest.from_message(m, m.decode()) def test_execute(self): diff --git a/celery/tests/test_worker/test_worker_mediator.py b/celery/tests/test_worker/test_worker_mediator.py --- a/celery/tests/test_worker/test_worker_mediator.py +++ b/celery/tests/test_worker/test_worker_mediator.py @@ -1,5 +1,7 @@ from __future__ import absolute_import +import sys + from Queue import Queue from mock import Mock, patch @@ -40,7 +42,7 @@ def test_mediator_start__stop(self): self.assertTrue(m._is_shutdown.isSet()) self.assertTrue(m._is_stopped.isSet()) - def test_mediator_move(self): + def test_mediator_body(self): ready_queue = Queue() got = {} @@ -50,7 +52,7 @@ def mycallback(value): m = Mediator(ready_queue, mycallback) ready_queue.put(MockTask("George Costanza")) - m.move() + m.body() self.assertEqual(got["value"], "George Costanza") @@ -60,7 +62,7 @@ def test_mediator_crash(self, _exit): class _Mediator(Mediator): - def move(self): + def body(self): try: raise KeyError("foo") finally: @@ -69,11 +71,17 @@ def move(self): ready_queue = Queue() ms[0] = m = _Mediator(ready_queue, None) ready_queue.put(MockTask("George Constanza")) - m.run() + stderr = Mock() + p, sys.stderr = sys.stderr, stderr + try: + m.run() + finally: + sys.stderr = p self.assertTrue(_exit.call_count) + self.assertTrue(stderr.write.call_count) - def test_mediator_move_exception(self): + def test_mediator_body_exception(self): ready_queue = Queue() def mycallback(value): @@ -82,7 +90,7 @@ def mycallback(value): m = Mediator(ready_queue, mycallback) ready_queue.put(MockTask("Elaine M. Benes")) - m.move() + m.body() def test_run(self): ready_queue = Queue() @@ -100,7 +108,7 @@ def mycallback(value): self.assertTrue(m._is_shutdown.isSet()) self.assertTrue(m._is_stopped.isSet()) - def test_mediator_move_revoked(self): + def test_mediator_body_revoked(self): ready_queue = Queue() got = {} @@ -113,7 +121,7 @@ def mycallback(value): revoked_tasks.add(t.task_id) ready_queue.put(t) - m.move() + m.body() self.assertNotIn("value", got) self.assertTrue(t.on_ack.call_count) diff --git a/celery/tests/utils.py b/celery/tests/utils.py --- a/celery/tests/utils.py +++ b/celery/tests/utils.py @@ -254,7 +254,7 @@ def myimp(name, *args, **kwargs): @contextmanager def override_stdouts(): - """Override `sys.stdout` and `sys.stderr` with `StringIO`.""" + """Override `sys.stdout` and `sys.stderr` with `WhateverIO`.""" prev_out, prev_err = sys.stdout, sys.stderr mystdout, mystderr = WhateverIO(), WhateverIO() sys.stdout = sys.__stdout__ = mystdout diff --git a/requirements/test.txt b/requirements/test.txt --- a/requirements/test.txt +++ b/requirements/test.txt @@ -7,3 +7,4 @@ mock>=0.7.0 redis pymongo SQLAlchemy +PyOpenSSL
1.0
NVIDIA__NeMo-1323
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Loading NLP and ASR models might result in `Missing key(s) in state_dict` error **Describe the bug** After #1278 is merged, any model that uses Metrics (WERBPE, WER, TopKClassificationAccuracy, ClassificationReport, Perplexity) might throw an error like: ``` RuntimeError: Error(s) in loading state_dict for PunctuationCapitalizationModel: Missing key(s) in state_dict: "punct_class_report.tp", "punct_class_report.fn", "punct_class_report.fp", "capit_class_report.tp", "capit_class_report.fn", "capit_class_report.fp". ``` **Steps/Code to reproduce bug** N/A **Expected behavior** N/A **Environment overview (please complete the following information)** N/A **Environment details** N/A **Additional context** The current work-around is to pass strict=False to either NeMo's Model.from_pretrained(..., strict=False) NeMo's ModelPT.load_from_checkpoint(..., strict=False) Lightnings's LightningModule.load_from_checkpoint(..., strict=False) Torch's Module.load_state_dict(..., strict=False) </issue> <code> [start of README.rst] 1 2 |status| |license| |lgtm_grade| |lgtm_alerts| |black| 3 4 .. |status| image:: http://www.repostatus.org/badges/latest/active.svg 5 :target: http://www.repostatus.org/#active 6 :alt: Project Status: Active – The project has reached a stable, usable state and is being actively developed. 7 8 9 .. |license| image:: https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg 10 :target: https://github.com/NVIDIA/NeMo/blob/master/LICENSE 11 :alt: NeMo core license and license for collections in this repo 12 13 .. |lgtm_grade| image:: https://img.shields.io/lgtm/grade/python/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 14 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/context:python 15 :alt: Language grade: Python 16 17 .. |lgtm_alerts| image:: https://img.shields.io/lgtm/alerts/g/NVIDIA/NeMo.svg?logo=lgtm&logoWidth=18 18 :target: https://lgtm.com/projects/g/NVIDIA/NeMo/alerts/ 19 :alt: Total alerts 20 21 .. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg 22 :target: https://github.com/psf/black 23 :alt: Code style: black 24 25 **NVIDIA NeMo** 26 =============== 27 28 Introduction 29 ------------ 30 31 NeMo is a toolkit for creating `Conversational AI <https://developer.nvidia.com/conversational-ai#started>`_ applications. 32 33 NeMo toolkit makes it possible for researchers to easily compose complex neural network architectures for conversational AI using reusable components - Neural Modules. 34 **Neural Modules** are conceptual blocks of neural networks that take *typed* inputs and produce *typed* outputs. Such modules typically represent data layers, encoders, decoders, language models, loss functions, or methods of combining activations. 35 36 37 The toolkit comes with extendable collections of pre-built modules and ready-to-use models for: 38 39 * `Automatic Speech Recognition (ASR) <https://ngc.nvidia.com/catalog/models/nvidia:nemospeechmodels>`_ 40 * `Natural Language Processing (NLP) <https://ngc.nvidia.com/catalog/models/nvidia:nemonlpmodels>`_ 41 * `Speech synthesis, or Text-To-Speech (TTS) <https://ngc.nvidia.com/catalog/models/nvidia:nemottsmodels>`_ 42 43 Built for speed, NeMo can utilize NVIDIA's Tensor Cores and scale out training to multiple GPUs and multiple nodes. 44 45 `NeMo product page. <https://developer.nvidia.com/nvidia-nemo>`_ 46 47 `Introductory video. <https://www.youtube.com/embed/wBgpMf_KQVw>`_ 48 49 .. raw:: html 50 51 <div style="position: relative; padding-bottom: 3%; height: 0; overflow: hidden; max-width: 100%; height: auto;"> 52 <iframe width="560" height="315" src="https://www.youtube.com/embed/wBgpMf_KQVw" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> 53 </div> 54 55 56 Requirements 57 ------------ 58 59 NeMo's works with: 60 61 1) Python 3.6 or 3.7 62 2) Pytorch 1.6 or above 63 64 Docker containers: 65 ~~~~~~~~~~~~~~~~~~ 66 The easiest way to start training with NeMo is by using `NeMo's container <https://ngc.nvidia.com/catalog/containers/nvidia:nemo>`_. 67 68 It has all requirements and NeMo 1.0.0b1 already installed. 69 70 .. code-block:: bash 71 72 docker run --gpus all -it --rm -v <nemo_github_folder>:/NeMo --shm-size=8g \ 73 -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit \ 74 stack=67108864 --device=/dev/snd nvcr.io/nvidia/nemo:v1.0.0b1 75 76 77 If you chose to work with main branch, we recommend using NVIDIA's PyTorch container version 20.09-py3. 78 79 .. code-block:: bash 80 81 docker run --gpus all -it --rm -v <nemo_github_folder>:/NeMo --shm-size=8g \ 82 -p 8888:8888 -p 6006:6006 --ulimit memlock=-1 --ulimit \ 83 stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:20.09-py3 84 85 86 Installation 87 ~~~~~~~~~~~~ 88 If you are not inside the NVIDIA docker container, please install Cython first. If you wish to either use the ASR or TTS collection, please install libsndfile1 and ffmpeg as well. 89 90 * ``pip install Cython`` 91 * ``apt-get update && apt-get install -y libsndfile1 ffmpeg`` (If you want to install the TTS or ASR collections) 92 93 Once requirements are satisfied, simply install using pip: 94 95 * ``pip install nemo_toolkit[all]==1.0.0b1`` (latest version) 96 97 Or if you want the latest (or particular) version from GitHub: 98 99 * ``python -m pip install git+https://github.com/NVIDIA/NeMo.git@{BRANCH}#egg=nemo_toolkit[all]`` - where {BRANCH} should be replaced with the branch you want. This is recommended route if you are testing out the latest WIP version of NeMo. 100 * ``./reinstall.sh`` - from NeMo's git root. This will install the version from current branch in developement mode. 101 102 Examples 103 ~~~~~~~~ 104 ``<nemo_github_folder>/examples/`` folder contains various example scripts. Many of them look very similar and have the same arguments because 105 we used `Facebook's Hydra <https://github.com/facebookresearch/hydra>`_ for configuration. 106 107 Here is an example command which trains ASR model (QuartzNet15x5) on LibriSpeech, using 4 GPUs and mixed precision training. 108 (It assumes you are inside the container with NeMo installed) 109 110 .. code-block:: bash 111 112 root@987b39669a7e:/NeMo# python examples/asr/speech_to_text.py --config-name=quartznet_15x5 \ 113 model.train_ds.manifest_filepath=<PATH_TO_DATA>/librispeech-train-all.json \ 114 model.validation_ds.manifest_filepath=<PATH_TO_DATA>/librispeech-dev-other.json \ 115 trainer.gpus=4 trainer.max_epochs=128 model.train_ds.batch_size=64 \ 116 +trainer.precision=16 +trainer.amp_level=O1 \ 117 +model.validation_ds.num_workers=16 \ 118 +model.train_ds.num_workers=16 \ 119 +model.train_ds.pin_memory=True 120 121 #(Optional) Tensorboard: 122 tensorboard --bind_all --logdir nemo_experiments 123 124 125 126 Documentation 127 ------------- 128 129 .. |main| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 130 :alt: Documentation Status 131 :scale: 100% 132 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 133 134 .. |latest| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=main 135 :alt: Documentation Status 136 :scale: 100% 137 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/ 138 139 .. |stable| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=stable 140 :alt: Documentation Status 141 :scale: 100% 142 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/ 143 144 .. |v0111| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=v0.11.1 145 :alt: Documentation Status 146 :scale: 100% 147 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/v0.11.1/ 148 149 .. |v0110| image:: https://readthedocs.com/projects/nvidia-nemo/badge/?version=v0.11.0 150 :alt: Documentation Status 151 :scale: 100% 152 :target: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/v0.11.0/ 153 154 155 156 +---------+----------+---------------------------------------------------------+ 157 | Version | Status | Description | 158 +=========+==========+=========================================================+ 159 | Latest | |latest| | Documentation of the latest (i.e. `main`) branch | 160 +---------+----------+---------------------------------------------------------+ 161 | Stable | |stable| | Documentation of the stable (i.e. `0.11.1`) branch | 162 +---------+----------+---------------------------------------------------------+ 163 | Main | |main| | Documentation of the `main` branch | 164 +---------+----------+---------------------------------------------------------+ 165 | v0.11.1 | |v0111| | Documentation of the v0.11.1 release | 166 +---------+----------+---------------------------------------------------------+ 167 | v0.11.0 | |v0110| | Documentation of the v0.11.0 release | 168 +---------+----------+---------------------------------------------------------+ 169 170 171 Tutorials 172 --------- 173 The best way to get started with NeMo is to checkout one of our tutorials. 174 175 Most NeMo tutorials can be run on `Google's Colab <https://colab.research.google.com/notebooks/intro.ipynb>`_. 176 177 To run tutorials: 178 179 * Click on Colab link (see table below) 180 * Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 181 182 .. list-table:: *Tutorials* 183 :widths: 15 25 25 184 :header-rows: 1 185 186 * - Domain 187 - Title 188 - GitHub URL 189 * - NeMo 190 - Simple Application with NeMo 191 - `Voice swap app <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/NeMo_voice_swap_app.ipynb>`_ 192 * - NeMo 193 - Exploring NeMo Fundamentals 194 - `NeMo primer <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/00_NeMo_Primer.ipynb>`_ 195 * - NeMo Models 196 - Exploring NeMo Model Construction 197 - `NeMo models <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/01_NeMo_Models.ipynb>`_ 198 * - ASR 199 - ASR with NeMo 200 - `ASR with NeMo <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/asr/01_ASR_with_NeMo.ipynb>`_ 201 * - ASR 202 - Speech Commands 203 - `Speech commands <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/asr/03_Speech_Commands.ipynb>`_ 204 * - ASR 205 - Speaker Recognition and Verification 206 - `Speaker Recognition and Verification <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/speaker_recognition/Speaker_Recognition_Verification.ipynb>`_ 207 * - ASR 208 - Online Noise Augmentation 209 - `Online noise augmentation <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/asr/05_Online_Noise_Augmentation.ipynb>`_ 210 * - NLP 211 - Using Pretrained Language Models for Downstream Tasks 212 - `Pretrained language models for downstream tasks <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb>`_ 213 * - NLP 214 - Exploring NeMo NLP Tokenizers 215 - `NLP tokenizers <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/02_NLP_Tokenizers.ipynb>`_ 216 * - NLP 217 - Text Classification (Sentiment Analysis) with BERT 218 - `Text Classification (Sentiment Analysis) <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb>`_ 219 * - NLP 220 - Question answering with SQuAD 221 - `Question answering Squad <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Question_Answering_Squad.ipynb>`_ 222 * - NLP 223 - Token Classification (Named Entity Recognition) 224 - `Token classification: named entity recognition <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb>`_ 225 * - NLP 226 - Joint Intent Classification and Slot Filling 227 - `Joint Intent and Slot Classification <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb>`_ 228 * - NLP 229 - GLUE Benchmark 230 - `GLUE benchmark <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/GLUE_Benchmark.ipynb>`_ 231 * - NLP 232 - Punctuation and Capitialization 233 - `Punctuation and capitalization <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Punctuation_and_Capitalization.ipynb>`_ 234 * - NLP 235 - Named Entity Recognition - BioMegatron 236 - `Named Entity Recognition - BioMegatron <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Token_Classification-BioMegatron.ipynb>`_ 237 * - NLP 238 - Relation Extraction - BioMegatron 239 - `Relation Extraction - BioMegatron <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb>`_ 240 241 * - TTS 242 - Speech Synthesis 243 - `TTS inference <https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/tts/1_TTS_inference.ipynb>`_ 244 245 Contributing 246 ------------ 247 248 We welcome community contributions! Please refer to the CONTRIBUTING.md for the process. 249 250 License 251 ------- 252 NeMo is under Apache 2.0 license. 253 [end of README.rst] [start of examples/asr/speech_to_text_infer.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This script serves three goals: 17 (1) Demonstrate how to use NeMo Models outside of PytorchLightning 18 (2) Shows example of batch ASR inference 19 (3) Serves as CI test for pre-trained checkpoint 20 """ 21 22 from argparse import ArgumentParser 23 24 import torch 25 26 from nemo.collections.asr.metrics.wer import WER, word_error_rate 27 from nemo.collections.asr.models import EncDecCTCModel 28 from nemo.utils import logging 29 30 try: 31 from torch.cuda.amp import autocast 32 except ImportError: 33 from contextlib import contextmanager 34 35 @contextmanager 36 def autocast(enabled=None): 37 yield 38 39 40 can_gpu = torch.cuda.is_available() 41 42 43 def main(): 44 parser = ArgumentParser() 45 parser.add_argument( 46 "--asr_model", type=str, default="QuartzNet15x5Base-En", required=True, help="Pass: 'QuartzNet15x5Base-En'", 47 ) 48 parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data") 49 parser.add_argument("--batch_size", type=int, default=4) 50 parser.add_argument("--wer_tolerance", type=float, default=1.0, help="used by test") 51 parser.add_argument( 52 "--normalize_text", default=True, type=bool, help="Normalize transcripts or not. Set to False for non-English." 53 ) 54 args = parser.parse_args() 55 torch.set_grad_enabled(False) 56 57 if args.asr_model.endswith('.nemo'): 58 logging.info(f"Using local ASR model from {args.asr_model}") 59 # TODO: Remove strict, when lightning has persistent parameter support for add_state() 60 asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model, strict=False) 61 else: 62 logging.info(f"Using NGC cloud ASR model {args.asr_model}") 63 # TODO: Remove strict, when lightning has persistent parameter support for add_state() 64 asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model, strict=False) 65 asr_model.setup_test_data( 66 test_data_config={ 67 'sample_rate': 16000, 68 'manifest_filepath': args.dataset, 69 'labels': asr_model.decoder.vocabulary, 70 'batch_size': args.batch_size, 71 'normalize_transcripts': args.normalize_text, 72 } 73 ) 74 if can_gpu: 75 asr_model = asr_model.cuda() 76 asr_model.eval() 77 labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))]) 78 wer = WER(vocabulary=asr_model.decoder.vocabulary) 79 hypotheses = [] 80 references = [] 81 for test_batch in asr_model.test_dataloader(): 82 if can_gpu: 83 test_batch = [x.cuda() for x in test_batch] 84 with autocast(): 85 log_probs, encoded_len, greedy_predictions = asr_model( 86 input_signal=test_batch[0], input_signal_length=test_batch[1] 87 ) 88 hypotheses += wer.ctc_decoder_predictions_tensor(greedy_predictions) 89 for batch_ind in range(greedy_predictions.shape[0]): 90 reference = ''.join([labels_map[c] for c in test_batch[2][batch_ind].cpu().detach().numpy()]) 91 references.append(reference) 92 del test_batch 93 wer_value = word_error_rate(hypotheses=hypotheses, references=references) 94 if wer_value > args.wer_tolerance: 95 raise ValueError(f"Got WER of {wer_value}. It was higher than {args.wer_tolerance}") 96 logging.info(f'Got WER of {wer_value}. Tolerance was {args.wer_tolerance}') 97 98 99 if __name__ == '__main__': 100 main() # noqa pylint: disable=no-value-for-parameter 101 [end of examples/asr/speech_to_text_infer.py] [start of examples/nlp/token_classification/punctuation_capitalization.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import pytorch_lightning as pl 16 from omegaconf import DictConfig, OmegaConf 17 18 from nemo.collections.nlp.models import PunctuationCapitalizationModel 19 from nemo.core.config import hydra_runner 20 from nemo.utils import logging 21 from nemo.utils.exp_manager import exp_manager 22 23 24 """ 25 To run this script and train the model from scratch, use: 26 python punctuation_and_capitalization.py \ 27 model.dataset.data_dir=PATH_TO_DATA_DIR 28 29 To use one of the pretrained versions of the model, run: 30 python punctuation_and_capitalization.py \ 31 pretrained_model=Punctuation_Capitalization_with_BERT 32 33 To use one of the pretrained versions of the model and finetune it, run: 34 python punctuation_and_capitalization.py \ 35 pretrained_model=Punctuation_Capitalization_with_BERT \ 36 model.dataset.data_dir=PATH_TO_DATA_DIR 37 38 More details on the task and data format could be found in tutorials/nlp/Punctuation_and_Capitalization.ipynb 39 """ 40 41 42 @hydra_runner(config_path="conf", config_name="punctuation_capitalization_config") 43 def main(cfg: DictConfig) -> None: 44 trainer = pl.Trainer(**cfg.trainer) 45 exp_manager(trainer, cfg.get("exp_manager", None)) 46 do_training = True 47 if not cfg.pretrained_model: 48 logging.info(f'Config: {OmegaConf.to_yaml(cfg)}') 49 model = PunctuationCapitalizationModel(cfg.model, trainer=trainer) 50 else: 51 logging.info(f'Loading pretrained model {cfg.pretrained_model}') 52 # TODO: Remove strict, when lightning has persistent parameter support for add_state() 53 model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model, strict=False) 54 data_dir = cfg.model.dataset.get('data_dir', None) 55 if data_dir: 56 # we can also do finetunining of the pretrained model but it will require 57 # setting up train and validation Pytorch DataLoaders 58 model.setup_training_data(data_dir=data_dir) 59 # evaluation could be done on multiple files, use model.validation_ds.ds_items to specify multiple 60 # data directories if needed 61 model.setup_validation_data(data_dirs=data_dir) 62 logging.info(f'Using config file of the pretrained model') 63 else: 64 do_training = False 65 logging.info( 66 f'Data dir should be specified for training/finetuning. ' 67 f'Using pretrained {cfg.pretrained_model} model weights and skipping finetuning.' 68 ) 69 70 if do_training: 71 trainer.fit(model) 72 if cfg.model.nemo_path: 73 model.save_to(cfg.model.nemo_path) 74 75 logging.info( 76 'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU ' 77 'and no DDP to obtain accurate results' 78 ) 79 gpu = 1 if cfg.trainer.gpus != 0 else 0 80 trainer = pl.Trainer(gpus=gpu) 81 model.set_trainer(trainer) 82 83 # run an inference on a few examples 84 queries = [ 85 'we bought four shirts one pen and a mug from the nvidia gear store in santa clara', 86 'what can i do for you today', 87 'how are you', 88 ] 89 inference_results = model.add_punctuation_capitalization(queries) 90 91 for query, result in zip(queries, inference_results): 92 logging.info(f'Query : {query}') 93 logging.info(f'Result: {result.strip()}\n') 94 95 96 if __name__ == '__main__': 97 main() 98 [end of examples/nlp/token_classification/punctuation_capitalization.py] [start of examples/nlp/token_classification/token_classification.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 17 import pytorch_lightning as pl 18 from omegaconf import DictConfig, OmegaConf 19 20 from nemo.collections.nlp.models import TokenClassificationModel 21 from nemo.core.config import hydra_runner 22 from nemo.utils import logging 23 from nemo.utils.exp_manager import exp_manager 24 25 26 """ 27 ## Tasks 28 Token Classificatin script supports Named Entity Recognition task and other token level classification tasks, 29 as long as the data followes the format specified below. 30 31 Token Classification Model requires the data to be splitted into 2 files: text.txt and labels.txt. 32 Each line of the text.txt file contains text sequences, where words are separated with spaces, i.e.: 33 [WORD] [SPACE] [WORD] [SPACE] [WORD]. 34 The labels.txt file contains corresponding labels for each word in text.txt, the labels are separated with spaces, i.e.: 35 [LABEL] [SPACE] [LABEL] [SPACE] [LABEL]. 36 37 Example of a text.txt file: 38 Jennifer is from New York City . 39 She likes ... 40 ... 41 42 Corresponding labels.txt file: 43 B-PER O O B-LOC I-LOC I-LOC O 44 O O ... 45 ... 46 47 48 ## Preparing the dataset 49 To convert an IOB format data to the format required for training, run 50 examples/nlp/token_classification/data/import_from_iob_format.py on your train and dev files, as follows: 51 52 python examples/nlp/token_classification/data/import_from_iob_format.py --data_file PATH_TO_IOB_FORMAT_DATAFILE 53 54 55 ## Model Training 56 57 To train TokenClassification model from scratch with the default config file, run: 58 59 python token_classification.py \ 60 model.dataset.data_dir=<PATH_TO_DATA_DIR> \ 61 trainer.max_epochs=<NUM_EPOCHS> \ 62 trainer.gpus="[<CHANGE_TO_GPU_YOU_WANT_TO_USE>] 63 64 To use one of the pretrained versions of the model, run: 65 python token_classification.py \ 66 pretrained_model=NERModel 67 68 To use one of the pretrained versions of the model and finetune it, run: 69 python token_classification.py \ 70 model.dataset.data_dir=<PATH_TO_DATA_DIR> \ 71 pretrained_model=NERModel 72 73 More details on how to use this script could be found in 74 tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb 75 """ 76 77 78 @hydra_runner(config_path="conf", config_name="token_classification_config") 79 def main(cfg: DictConfig) -> None: 80 trainer = pl.Trainer(**cfg.trainer) 81 exp_dir = exp_manager(trainer, cfg.get("exp_manager", None)) 82 do_training = True 83 if not cfg.pretrained_model: 84 logging.info(f'Config: {OmegaConf.to_yaml(cfg)}') 85 model = TokenClassificationModel(cfg.model, trainer=trainer) 86 else: 87 logging.info(f'Loading pretrained model {cfg.pretrained_model}') 88 # TODO: Remove strict, when lightning has persistent parameter support for add_state() 89 model = TokenClassificationModel.from_pretrained(cfg.pretrained_model, strict=False) 90 91 data_dir = cfg.model.dataset.get('data_dir', None) 92 if data_dir: 93 # we can also do finetunining of the pretrained model but it will require 94 # setting up train and validation Pytorch DataLoaders 95 # setup the data dir to get class weights statistics 96 model.update_data_dir(data_dir=data_dir) 97 # then we're setting up loss, use model.dataset.class_balancing, 98 # if you want to add class weights to the CrossEntropyLoss 99 model.setup_loss(class_balancing=cfg.model.dataset.class_balancing) 100 # finally, setup train and validation Pytorch DataLoaders 101 model.setup_training_data() 102 model.setup_validation_data() 103 logging.info(f'Using config file of the pretrained model') 104 else: 105 do_training = False 106 logging.info( 107 f'Data dir should be specified for finetuning the pretrained model. ' 108 f'Using pretrained {cfg.pretrained_model} model weights and skipping finetuning.' 109 ) 110 111 if do_training: 112 trainer.fit(model) 113 if cfg.model.nemo_path: 114 model.save_to(cfg.model.nemo_path) 115 116 """ 117 After model training is done, you can use the model for inference. 118 You can either evaluate data from a text_file that follows training data format, 119 or provide a list of queries you want to add entities to 120 121 During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU 122 and no DDP to obtain accurate results 123 """ 124 logging.info( 125 'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU ' 126 'and no DDP to obtain accurate results' 127 ) 128 gpu = 1 if cfg.trainer.gpus != 0 else 0 129 trainer = pl.Trainer(gpus=gpu) 130 model.set_trainer(trainer) 131 132 if do_training: 133 # run evaluation on a dataset from file 134 # only possible if model.dataset.data_dir is specified 135 # change the path to the file you want to use for the final evaluation 136 model.evaluate_from_file( 137 text_file=os.path.join(cfg.model.dataset.data_dir, cfg.model.validation_ds.text_file), 138 labels_file=os.path.join(cfg.model.dataset.data_dir, cfg.model.validation_ds.labels_file), 139 output_dir=exp_dir, 140 add_confusion_matrix=True, 141 normalize_confusion_matrix=True, 142 ) 143 144 # run an inference on a few examples 145 queries = ['we bought four shirts from the nvidia gear store in santa clara.', 'Nvidia is a company.'] 146 results = model.add_predictions(queries) 147 148 for query, result in zip(queries, results): 149 logging.info(f'Query : {query}') 150 logging.info(f'Result: {result.strip()}\n') 151 152 153 if __name__ == '__main__': 154 main() 155 [end of examples/nlp/token_classification/token_classification.py] [start of nemo/collections/asr/metrics/wer.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import List 16 17 import editdistance 18 import torch 19 from pytorch_lightning.metrics import Metric 20 21 from nemo.utils import logging 22 23 __all__ = ['word_error_rate', 'WER'] 24 25 26 def word_error_rate(hypotheses: List[str], references: List[str], use_cer=False) -> float: 27 """ 28 Computes Average Word Error rate between two texts represented as 29 corresponding lists of string. Hypotheses and references must have same 30 length. 31 Args: 32 hypotheses: list of hypotheses 33 references: list of references 34 use_cer: bool, set True to enable cer 35 Returns: 36 (float) average word error rate 37 """ 38 scores = 0 39 words = 0 40 if len(hypotheses) != len(references): 41 raise ValueError( 42 "In word error rate calculation, hypotheses and reference" 43 " lists must have the same number of elements. But I got:" 44 "{0} and {1} correspondingly".format(len(hypotheses), len(references)) 45 ) 46 for h, r in zip(hypotheses, references): 47 if use_cer: 48 h_list = list(h) 49 r_list = list(r) 50 else: 51 h_list = h.split() 52 r_list = r.split() 53 words += len(r_list) 54 scores += editdistance.eval(h_list, r_list) 55 if words != 0: 56 wer = 1.0 * scores / words 57 else: 58 wer = float('inf') 59 return wer 60 61 62 class WER(Metric): 63 """ 64 This metric computes numerator and denominator for Overall Word Error Rate (WER) between prediction and reference texts. 65 When doing distributed training/evaluation the result of res=WER(predictions, targets, target_lengths) calls 66 will be all-reduced between all workers using SUM operations. 67 Here contains two numbers res=[wer_numerator, wer_denominator]. WER=wer_numerator/wer_denominator. 68 69 If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step results. 70 Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER. 71 72 Example: 73 def validation_step(self, batch, batch_idx): 74 ... 75 wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len) 76 return {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom} 77 78 def validation_epoch_end(self, outputs): 79 ... 80 wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum() 81 wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum() 82 tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom} 83 return {'val_loss': val_loss_mean, 'log': tensorboard_logs} 84 85 Args: 86 vocabulary: List of strings that describes the vocabulary of the dataset. 87 batch_dim_index: Index of the batch dimension. 88 use_cer: Whether to use Character Error Rate isntead of Word Error Rate. 89 ctc_decode: Whether to use CTC decoding or not. Currently, must be set. 90 log_prediction: Whether to log a single decoded sample per call. 91 92 Returns: 93 res: a torch.Tensor object with two elements: [wer_numerator, wer_denominator]. To correctly compute average 94 text word error rate, compute wer=wer_numerator/wer_denominator 95 """ 96 97 def __init__( 98 self, 99 vocabulary, 100 batch_dim_index=0, 101 use_cer=False, 102 ctc_decode=True, 103 log_prediction=True, 104 dist_sync_on_step=False, 105 ): 106 super().__init__(dist_sync_on_step=dist_sync_on_step) 107 self.batch_dim_index = batch_dim_index 108 self.blank_id = len(vocabulary) 109 self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))]) 110 self.use_cer = use_cer 111 self.ctc_decode = ctc_decode 112 self.log_prediction = log_prediction 113 114 self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum') 115 self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum') 116 117 def ctc_decoder_predictions_tensor(self, predictions: torch.Tensor) -> List[str]: 118 """ 119 Decodes a sequence of labels to words 120 """ 121 hypotheses = [] 122 # Drop predictions to CPU 123 prediction_cpu_tensor = predictions.long().cpu() 124 # iterate over batch 125 for ind in range(prediction_cpu_tensor.shape[self.batch_dim_index]): 126 prediction = prediction_cpu_tensor[ind].detach().numpy().tolist() 127 # CTC decoding procedure 128 decoded_prediction = [] 129 previous = self.blank_id 130 for p in prediction: 131 if (p != previous or previous == self.blank_id) and p != self.blank_id: 132 decoded_prediction.append(p) 133 previous = p 134 hypothesis = ''.join([self.labels_map[c] for c in decoded_prediction]) 135 hypotheses.append(hypothesis) 136 return hypotheses 137 138 def update(self, predictions: torch.Tensor, targets: torch.Tensor, target_lengths: torch.Tensor) -> torch.Tensor: 139 words = 0.0 140 scores = 0.0 141 references = [] 142 with torch.no_grad(): 143 # prediction_cpu_tensor = tensors[0].long().cpu() 144 targets_cpu_tensor = targets.long().cpu() 145 tgt_lenths_cpu_tensor = target_lengths.long().cpu() 146 147 # iterate over batch 148 for ind in range(targets_cpu_tensor.shape[self.batch_dim_index]): 149 tgt_len = tgt_lenths_cpu_tensor[ind].item() 150 target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist() 151 reference = ''.join([self.labels_map[c] for c in target]) 152 references.append(reference) 153 if self.ctc_decode: 154 hypotheses = self.ctc_decoder_predictions_tensor(predictions) 155 else: 156 raise NotImplementedError("Implement me if you need non-CTC decode on predictions") 157 158 if self.log_prediction: 159 logging.info(f"\n") 160 logging.info(f"reference:{references[0]}") 161 logging.info(f"decoded :{hypotheses[0]}") 162 163 for h, r in zip(hypotheses, references): 164 if self.use_cer: 165 h_list = list(h) 166 r_list = list(r) 167 else: 168 h_list = h.split() 169 r_list = r.split() 170 words += len(r_list) 171 # Compute Levenstein's distance 172 scores += editdistance.eval(h_list, r_list) 173 174 self.scores = torch.tensor(scores).to(predictions.device) 175 self.words = torch.tensor(words).to(predictions.device) 176 # return torch.tensor([scores, words]).to(predictions.device) 177 178 def compute(self): 179 return self.scores / self.words 180 [end of nemo/collections/asr/metrics/wer.py] [start of nemo/collections/asr/metrics/wer_bpe.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import List 16 17 import editdistance 18 import torch 19 from pytorch_lightning.metrics import Metric 20 21 from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec 22 from nemo.utils import logging 23 24 25 class WERBPE(Metric): 26 """ 27 This metric computes numerator and denominator for Overall Word Error Rate for BPE tokens (WER-BPE) between prediction and reference texts. 28 When doing distributed training/evaluation the result of res=WERBPE(predictions, targets, target_lengths) calls 29 will be all-reduced between all workers using SUM operations. 30 Here contains two numbers res=[wer_numerator, wer_denominator]. WERBPE=wer_numerator/wer_denominator. 31 32 If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step results. 33 Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER. 34 35 Example: 36 def validation_step(self, batch, batch_idx): 37 ... 38 wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len) 39 return {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom} 40 41 def validation_epoch_end(self, outputs): 42 ... 43 wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum() 44 wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum() 45 tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom} 46 return {'val_loss': val_loss_mean, 'log': tensorboard_logs} 47 48 Args: 49 vocabulary: NeMo tokenizer object, which inherits from TokenizerSpec. 50 batch_dim_index: Index of the batch dimension. 51 use_cer: Whether to compute word-error-rate or character-error-rate. 52 ctc_decode: Whether to perform CTC decode. 53 log_prediction: Whether to log a single decoded sample per call. 54 55 Returns: 56 res: a torch.Tensor object with two elements: [wer_numerator, wer_denominators]. To correctly compute average 57 text word error rate, compute wer=wer_numerator/wer_denominators 58 """ 59 60 def __init__( 61 self, 62 tokenizer: TokenizerSpec, 63 batch_dim_index=0, 64 use_cer=False, 65 ctc_decode=True, 66 log_prediction=True, 67 dist_sync_on_step=False, 68 ): 69 super().__init__(dist_sync_on_step=dist_sync_on_step) 70 self.tokenizer = tokenizer 71 self.batch_dim_index = batch_dim_index 72 self.blank_id = tokenizer.tokenizer.vocab_size 73 self.use_cer = use_cer 74 self.ctc_decode = ctc_decode 75 self.log_prediction = log_prediction 76 77 self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum') 78 self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum') 79 80 def ctc_decoder_predictions_tensor(self, predictions: torch.Tensor) -> List[str]: 81 """ 82 Decodes a sequence of labels to words 83 """ 84 hypotheses = [] 85 # Drop predictions to CPU 86 prediction_cpu_tensor = predictions.long().cpu() 87 # iterate over batch 88 for ind in range(prediction_cpu_tensor.shape[self.batch_dim_index]): 89 prediction = prediction_cpu_tensor[ind].detach().numpy().tolist() 90 # CTC decoding procedure 91 decoded_prediction = [] 92 previous = self.blank_id 93 for p in prediction: 94 if (p != previous or previous == self.blank_id) and p != self.blank_id: 95 decoded_prediction.append(p) 96 previous = p 97 hypothesis = self.tokenizer.ids_to_text(decoded_prediction) 98 hypotheses.append(hypothesis) 99 return hypotheses 100 101 def update(self, predictions: torch.Tensor, targets: torch.Tensor, target_lengths: torch.Tensor): 102 words = 0.0 103 scores = 0.0 104 references = [] 105 with torch.no_grad(): 106 # prediction_cpu_tensor = tensors[0].long().cpu() 107 targets_cpu_tensor = targets.long().cpu() 108 tgt_lenths_cpu_tensor = target_lengths.long().cpu() 109 110 # iterate over batch 111 for ind in range(targets_cpu_tensor.shape[self.batch_dim_index]): 112 tgt_len = tgt_lenths_cpu_tensor[ind].item() 113 target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist() 114 reference = self.tokenizer.ids_to_text(target) 115 references.append(reference) 116 if self.ctc_decode: 117 hypotheses = self.ctc_decoder_predictions_tensor(predictions) 118 else: 119 raise NotImplementedError("Implement me if you need non-CTC decode on predictions") 120 121 if self.log_prediction: 122 logging.info(f"\n") 123 logging.info(f"reference:{references[0]}") 124 logging.info(f"decoded :{hypotheses[0]}") 125 126 for h, r in zip(hypotheses, references): 127 if self.use_cer: 128 h_list = list(h) 129 r_list = list(r) 130 else: 131 h_list = h.split() 132 r_list = r.split() 133 words += len(r_list) 134 # Compute Levenstein's distance 135 scores += editdistance.eval(h_list, r_list) 136 137 self.scores = torch.tensor(scores).to(predictions.device) 138 self.words = torch.tensor(words).to(predictions.device) 139 # return torch.tensor([scores, words]).to(predictions.device) 140 141 def compute(self): 142 return self.scores / self.words 143 [end of nemo/collections/asr/metrics/wer_bpe.py] [start of nemo/collections/common/metrics/classification_accuracy.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import torch 16 from pytorch_lightning.metrics import Metric 17 18 __all__ = ['TopKClassificationAccuracy'] 19 20 21 class TopKClassificationAccuracy(Metric): 22 """ 23 This metric computes numerator and denominator for Overall Accuracy between logits and labels. 24 When doing distributed training/evaluation the result of res=TopKClassificationAccuracy(logits, labels) calls 25 will be all-reduced between all workers using SUM operations. 26 Here contains two numbers res=[correctly_predicted, total_samples]. Accuracy=correctly_predicted/total_samples. 27 28 If used with PytorchLightning LightningModule, include correct_count and total_count inside validation_step results. 29 Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER. 30 31 Example: 32 def validation_step(self, batch, batch_idx): 33 ... 34 correct_count, total_count = self._accuracy(logits, labels) 35 return {'val_loss': loss_value, 'val_correct_count': correct_count, 'val_total_count': total_count} 36 37 def validation_epoch_end(self, outputs): 38 ... 39 val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean() 40 correct_counts = torch.stack([x['val_correct_counts'] for x in outputs]) 41 total_counts = torch.stack([x['val_total_counts'] for x in outputs]) 42 43 topk_scores = compute_topk_accuracy(correct_counts, total_counts) 44 45 tensorboard_log = {'val_loss': val_loss_mean} 46 for top_k, score in zip(self._accuracy.top_k, topk_scores): 47 tensorboard_log['val_epoch_top@{}'.format(top_k)] = score 48 49 return {'log': tensorboard_log} 50 51 Args: 52 top_k: Optional list of integers. Defaults to [1]. 53 54 Returns: 55 res: a torch.Tensor object with two elements: [correct_count, total_count]. To correctly compute average 56 accuracy, compute acc=correct_count/total_count 57 """ 58 59 def __init__(self, top_k=None, dist_sync_on_step=False): 60 super().__init__(dist_sync_on_step=dist_sync_on_step) 61 62 if top_k is None: 63 top_k = [1] 64 65 self.top_k = top_k 66 self.add_state("correct_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum') 67 self.add_state("total_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum') 68 69 def update(self, logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: 70 with torch.no_grad(): 71 max_k = max(self.top_k) 72 73 _, predictions = logits.topk(max_k, dim=1, largest=True, sorted=True) 74 predictions = predictions.t() 75 correct = predictions.eq(labels.view(1, -1)).expand_as(predictions) 76 77 correct_counts_k = [] 78 total_counts_k = [] 79 80 for k in self.top_k: 81 correct_k = correct[:k].view(-1).float().sum() 82 total_k = labels.shape[0] 83 84 correct_counts_k.append(correct_k) 85 total_counts_k.append(total_k) 86 87 self.correct_counts_k = torch.tensor(correct_counts_k, dtype=labels.dtype, device=labels.device) 88 self.total_counts_k = torch.tensor(total_counts_k, dtype=labels.dtype, device=labels.device) 89 90 def compute(self): 91 """ 92 Computes the top-k accuracy. 93 94 Returns: 95 A list of length `K`, such that k-th index corresponds to top-k accuracy 96 over all distributed processes. 97 """ 98 if not len(self.correct_counts_k) == len(self.top_k) == len(self.total_counts_k): 99 raise ValueError("length of counts must match to topk length") 100 101 if self.top_k == [1]: 102 return [self.correct_counts_k.float() / self.total_counts_k] 103 104 else: 105 top_k_scores = compute_topk_accuracy(self.correct_counts_k, self.total_counts_k) 106 107 return top_k_scores 108 109 110 def compute_topk_accuracy(correct_counts_k, total_counts_k): 111 """ 112 Computes the top-k accuracy 113 Args: 114 correct_counts: Tensor of shape [K], K being the top-k parameter. 115 total_counts: Tensor of shape [K], and K being the top-k parameter. 116 Returns: 117 A list of length `K`, such that k-th index corresponds to top-k accuracy 118 over all distributed processes. 119 """ 120 top_k_scores = [] 121 122 for ki in range(len(correct_counts_k)): 123 correct_count = correct_counts_k[ki].item() 124 total_count = total_counts_k[ki].item() 125 top_k_scores.append(correct_count / float(total_count)) 126 127 return top_k_scores 128 [end of nemo/collections/common/metrics/classification_accuracy.py] [start of nemo/collections/nlp/metrics/classification_report.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Any, Dict, Optional 16 17 import torch 18 from pytorch_lightning.metrics import Metric 19 from pytorch_lightning.metrics.utils import METRIC_EPS 20 21 __all__ = ['ClassificationReport'] 22 23 24 class ClassificationReport(Metric): 25 """ 26 This metric computes the number of True Positive, False Negative, and False Positive examples per class. 27 When doing distributed training/evaluation the result of res=ClassificationReport(predictions, labels) calls 28 will be all-reduced between all workers using SUM operations. 29 30 If used with PytorchLightning LightningModule, include TPs, FNs, and FPs inside validation_step results. 31 Then aggregate them at the end of validation epoch to correctly compute validation precision, recall, f1 32 using get_precision_recall_f1(). 33 34 Example: 35 def validation_step(self, batch, batch_idx): 36 ... 37 tp, fn, fp, _ = self.classification_report(preds, labels) 38 39 return {'val_loss': val_loss, 'tp': tp, 'fn': fn, 'fp': fp} 40 41 def validation_epoch_end(self, outputs): 42 ... 43 # calculate metrics and classification report 44 precision, recall, f1, report = self.classification_report.compute() 45 46 logging.info(report) 47 48 self.log('val_loss', avg_loss, prog_bar=True) 49 self.log('precision', precision) 50 self.log('f1', f1) 51 self.log('recall', recall) 52 53 Args: 54 num_classes: number of classes in the dataset 55 label_ids (optional): label name to label id mapping 56 mode: how to compute the average 57 dist_sync_on_step: sync across ddp 58 process_group: which processes to sync across 59 Return: 60 aggregated precision, recall, f1, report 61 """ 62 63 def __init__( 64 self, 65 num_classes: int, 66 label_ids: Dict[str, int] = None, 67 mode: str = 'macro', 68 dist_sync_on_step: bool = False, 69 process_group: Optional[Any] = None, 70 ): 71 super().__init__(dist_sync_on_step=dist_sync_on_step, process_group=process_group) 72 self.num_classes = num_classes 73 if label_ids: 74 self.ids_to_labels = {v: k for k, v in label_ids.items()} 75 else: 76 self.ids_to_labels = None 77 self.mode = mode 78 79 self.add_state("tp", default=torch.zeros(num_classes), dist_reduce_fx='sum') 80 self.add_state("fn", default=torch.zeros(num_classes), dist_reduce_fx='sum') 81 self.add_state("fp", default=torch.zeros(num_classes), dist_reduce_fx='sum') 82 self.add_state("num_examples_per_class", default=torch.zeros(num_classes), dist_reduce_fx='sum') 83 84 def update(self, predictions: torch.Tensor, labels: torch.Tensor): 85 TP = [] 86 FN = [] 87 FP = [] 88 for label_id in range(self.num_classes): 89 current_label = labels == label_id 90 label_predicted = predictions == label_id 91 92 TP.append((label_predicted == current_label)[label_predicted].sum()) 93 FP.append((label_predicted != current_label)[label_predicted].sum()) 94 FN.append((label_predicted != current_label)[current_label].sum()) 95 96 tp = torch.tensor(TP).to(predictions.device) 97 fn = torch.tensor(FN).to(predictions.device) 98 fp = torch.tensor(FP).to(predictions.device) 99 num_examples_per_class = tp + fn 100 101 self.tp += tp 102 self.fn += fn 103 self.fp += fp 104 self.num_examples_per_class += num_examples_per_class 105 106 def compute(self): 107 """ 108 Aggregates and then calculates logs classification report similar to sklearn.metrics.classification_report. 109 Typically used during epoch_end. 110 Return: 111 aggregated precision, recall, f1, report 112 """ 113 total_examples = torch.sum(self.num_examples_per_class) 114 num_non_empty_classes = torch.nonzero(self.num_examples_per_class).size(0) 115 116 precision = torch.true_divide(self.tp * 100, (self.tp + self.fp + METRIC_EPS)) 117 recall = torch.true_divide(self.tp * 100, (self.tp + self.fn + METRIC_EPS)) 118 f1 = torch.true_divide(2 * precision * recall, (precision + recall + METRIC_EPS)) 119 120 report = '\n{:50s} {:10s} {:10s} {:10s} {:10s}'.format('label', 'precision', 'recall', 'f1', 'support') 121 for i in range(len(self.tp)): 122 label = f'label_id: {i}' 123 if self.ids_to_labels and i in self.ids_to_labels: 124 label = f'{self.ids_to_labels[i]} ({label})' 125 126 report += '\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format( 127 label, precision[i], recall[i], f1[i], self.num_examples_per_class[i] 128 ) 129 130 micro_precision = torch.true_divide(torch.sum(self.tp) * 100, torch.sum(self.tp + self.fp) + METRIC_EPS) 131 micro_recall = torch.true_divide(torch.sum(self.tp) * 100, torch.sum(self.tp + self.fn) + METRIC_EPS) 132 micro_f1 = torch.true_divide(2 * micro_precision * micro_recall, (micro_precision + micro_recall + METRIC_EPS)) 133 134 macro_precision = torch.sum(precision) / num_non_empty_classes 135 macro_recall = torch.sum(recall) / num_non_empty_classes 136 macro_f1 = torch.sum(f1) / num_non_empty_classes 137 weighted_precision = torch.sum(precision * self.num_examples_per_class) / total_examples 138 weighted_recall = torch.sum(recall * self.num_examples_per_class) / total_examples 139 weighted_f1 = torch.sum(f1 * self.num_examples_per_class) / total_examples 140 141 report += "\n-------------------" 142 143 report += '\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format( 144 'micro avg', micro_precision, micro_recall, micro_f1, total_examples 145 ) 146 147 report += '\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format( 148 'macro avg', macro_precision, macro_recall, macro_f1, total_examples 149 ) 150 report += ( 151 '\n{:50s} {:8.2f} {:8.2f} {:8.2f} {:8.0f}'.format( 152 'weighted avg', weighted_precision, weighted_recall, weighted_f1, total_examples 153 ) 154 + '\n' 155 ) 156 157 if self.mode == 'macro': 158 return macro_precision, macro_recall, macro_f1, report 159 elif self.mode == 'weighted': 160 return weighted_precision, weighted_recall, weighted_f1, report 161 elif self.mode == 'micro': 162 return micro_precision, micro_recall, micro_f1, report 163 elif self.mode == 'all': 164 return precision, recall, f1, report 165 else: 166 raise ValueError( 167 f'{self.mode} mode is not supported. Choose "macro" to get aggregated numbers \ 168 or "all" to get values for each class.' 169 ) 170 [end of nemo/collections/nlp/metrics/classification_report.py] [start of nemo/collections/nlp/metrics/perplexity.py] 1 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Dict 16 17 import torch 18 from pytorch_lightning.metrics import Metric 19 20 from nemo.utils import logging 21 22 __all__ = ['Perplexity'] 23 24 25 class Perplexity(Metric): 26 """ 27 This metric computes the perplexity given the language model loss. 28 """ 29 30 def __init__(self, dist_sync_on_step=False): 31 super().__init__(dist_sync_on_step=dist_sync_on_step) 32 self.add_state('perplexity', default=torch.tensor(0), dist_reduce_fx='mean') 33 34 def update(self, loss: torch.Tensor): 35 self.perplexity = torch.exp(loss) 36 37 def compute(self): 38 return self.perplexity 39 [end of nemo/collections/nlp/metrics/perplexity.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
NVIDIA/NeMo
5cf042856bf718d27233a7538e0b094ce576d5c4
Loading NLP and ASR models might result in `Missing key(s) in state_dict` error **Describe the bug** After #1278 is merged, any model that uses Metrics (WERBPE, WER, TopKClassificationAccuracy, ClassificationReport, Perplexity) might throw an error like: ``` RuntimeError: Error(s) in loading state_dict for PunctuationCapitalizationModel: Missing key(s) in state_dict: "punct_class_report.tp", "punct_class_report.fn", "punct_class_report.fp", "capit_class_report.tp", "capit_class_report.fn", "capit_class_report.fp". ``` **Steps/Code to reproduce bug** N/A **Expected behavior** N/A **Environment overview (please complete the following information)** N/A **Environment details** N/A **Additional context** The current work-around is to pass strict=False to either NeMo's Model.from_pretrained(..., strict=False) NeMo's ModelPT.load_from_checkpoint(..., strict=False) Lightnings's LightningModule.load_from_checkpoint(..., strict=False) Torch's Module.load_state_dict(..., strict=False)
2020-10-21T20:01:26Z
<patch> diff --git a/examples/asr/speech_to_text_infer.py b/examples/asr/speech_to_text_infer.py --- a/examples/asr/speech_to_text_infer.py +++ b/examples/asr/speech_to_text_infer.py @@ -56,12 +56,10 @@ def main(): if args.asr_model.endswith('.nemo'): logging.info(f"Using local ASR model from {args.asr_model}") - # TODO: Remove strict, when lightning has persistent parameter support for add_state() - asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model, strict=False) + asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model) else: logging.info(f"Using NGC cloud ASR model {args.asr_model}") - # TODO: Remove strict, when lightning has persistent parameter support for add_state() - asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model, strict=False) + asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) asr_model.setup_test_data( test_data_config={ 'sample_rate': 16000, diff --git a/examples/nlp/token_classification/punctuation_capitalization.py b/examples/nlp/token_classification/punctuation_capitalization.py --- a/examples/nlp/token_classification/punctuation_capitalization.py +++ b/examples/nlp/token_classification/punctuation_capitalization.py @@ -49,8 +49,7 @@ def main(cfg: DictConfig) -> None: model = PunctuationCapitalizationModel(cfg.model, trainer=trainer) else: logging.info(f'Loading pretrained model {cfg.pretrained_model}') - # TODO: Remove strict, when lightning has persistent parameter support for add_state() - model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model, strict=False) + model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model) data_dir = cfg.model.dataset.get('data_dir', None) if data_dir: # we can also do finetunining of the pretrained model but it will require diff --git a/examples/nlp/token_classification/token_classification.py b/examples/nlp/token_classification/token_classification.py --- a/examples/nlp/token_classification/token_classification.py +++ b/examples/nlp/token_classification/token_classification.py @@ -85,8 +85,7 @@ def main(cfg: DictConfig) -> None: model = TokenClassificationModel(cfg.model, trainer=trainer) else: logging.info(f'Loading pretrained model {cfg.pretrained_model}') - # TODO: Remove strict, when lightning has persistent parameter support for add_state() - model = TokenClassificationModel.from_pretrained(cfg.pretrained_model, strict=False) + model = TokenClassificationModel.from_pretrained(cfg.pretrained_model) data_dir = cfg.model.dataset.get('data_dir', None) if data_dir: diff --git a/nemo/collections/asr/metrics/wer.py b/nemo/collections/asr/metrics/wer.py --- a/nemo/collections/asr/metrics/wer.py +++ b/nemo/collections/asr/metrics/wer.py @@ -111,8 +111,8 @@ def __init__( self.ctc_decode = ctc_decode self.log_prediction = log_prediction - self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum') - self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum') + self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False) + self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False) def ctc_decoder_predictions_tensor(self, predictions: torch.Tensor) -> List[str]: """ diff --git a/nemo/collections/asr/metrics/wer_bpe.py b/nemo/collections/asr/metrics/wer_bpe.py --- a/nemo/collections/asr/metrics/wer_bpe.py +++ b/nemo/collections/asr/metrics/wer_bpe.py @@ -74,8 +74,8 @@ def __init__( self.ctc_decode = ctc_decode self.log_prediction = log_prediction - self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum') - self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum') + self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False) + self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False) def ctc_decoder_predictions_tensor(self, predictions: torch.Tensor) -> List[str]: """ diff --git a/nemo/collections/common/metrics/classification_accuracy.py b/nemo/collections/common/metrics/classification_accuracy.py --- a/nemo/collections/common/metrics/classification_accuracy.py +++ b/nemo/collections/common/metrics/classification_accuracy.py @@ -63,8 +63,10 @@ def __init__(self, top_k=None, dist_sync_on_step=False): top_k = [1] self.top_k = top_k - self.add_state("correct_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum') - self.add_state("total_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum') + self.add_state( + "correct_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum', persistent=False + ) + self.add_state("total_counts_k", default=torch.zeros(len(self.top_k)), dist_reduce_fx='sum', persistent=False) def update(self, logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: with torch.no_grad(): diff --git a/nemo/collections/nlp/metrics/classification_report.py b/nemo/collections/nlp/metrics/classification_report.py --- a/nemo/collections/nlp/metrics/classification_report.py +++ b/nemo/collections/nlp/metrics/classification_report.py @@ -76,10 +76,12 @@ def __init__( self.ids_to_labels = None self.mode = mode - self.add_state("tp", default=torch.zeros(num_classes), dist_reduce_fx='sum') - self.add_state("fn", default=torch.zeros(num_classes), dist_reduce_fx='sum') - self.add_state("fp", default=torch.zeros(num_classes), dist_reduce_fx='sum') - self.add_state("num_examples_per_class", default=torch.zeros(num_classes), dist_reduce_fx='sum') + self.add_state("tp", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False) + self.add_state("fn", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False) + self.add_state("fp", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False) + self.add_state( + "num_examples_per_class", default=torch.zeros(num_classes), dist_reduce_fx='sum', persistent=False + ) def update(self, predictions: torch.Tensor, labels: torch.Tensor): TP = [] diff --git a/nemo/collections/nlp/metrics/perplexity.py b/nemo/collections/nlp/metrics/perplexity.py --- a/nemo/collections/nlp/metrics/perplexity.py +++ b/nemo/collections/nlp/metrics/perplexity.py @@ -29,7 +29,7 @@ class Perplexity(Metric): def __init__(self, dist_sync_on_step=False): super().__init__(dist_sync_on_step=dist_sync_on_step) - self.add_state('perplexity', default=torch.tensor(0), dist_reduce_fx='mean') + self.add_state('perplexity', default=torch.tensor(0), dist_reduce_fx='mean', persistent=False) def update(self, loss: torch.Tensor): self.perplexity = torch.exp(loss) </patch>
diff --git a/examples/tts/test_tts_infer.py b/examples/tts/test_tts_infer.py --- a/examples/tts/test_tts_infer.py +++ b/examples/tts/test_tts_infer.py @@ -75,8 +75,7 @@ def main(): logging.set_verbosity(logging.DEBUG) logging.info(f"Using NGC cloud ASR model {args.asr_model}") - # TODO: Remove strict, when lightning has persistent parameter support for add_state() - asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model, strict=False) + asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model) logging.info(f"Using NGC cloud TTS Spectrogram Generator model {args.tts_model_spec}") tts_model_spec = SpectrogramGenerator.from_pretrained(model_name=args.tts_model_spec) logging.info(f"Using NGC cloud TTS Vocoder model {args.tts_model_vocoder}")
1.0
slackapi__python-slack-events-api-76
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lack of X-Slack-Request-Timestamp and/or X-Slack-Signature in requests results in 500 ### Description I noticed that a test I wrote to simulate a user probing the events URL returned a 500 when I would have expected a 400. ### What type of issue is this? (place an `x` in one of the `[ ]`) - [x] bug - [ ] enhancement (feature request) - [ ] question - [ ] documentation related - [ ] testing related - [ ] discussion ### Requirements * [x] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them. -- _However, Spotify, my employer, has not yet signed the CLA to my knowledge, so I will not be able to submit a patch until they do._ * [x] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct). * [x] I've searched for any related issues and avoided creating a duplicate issue. ### Bug Report Filling out the following details about bugs will help us solve your issue sooner. #### Reproducible in: slackeventsapi version: 2.2.0 python version: 3.6 OS version(s): irrelevant, but Linux and MacOS #### Steps to reproduce: 1. Make a POST request to a slack events URL without `X-Slack-Events-Timestamp` in the headers. 2. Observe response of 500 and subsequent TypeError raised #### Expected result: A 400 error. #### Actual result: 500 and unhandled exception. #### Attachments: Lack of X-Slack-Request-Timestamp and/or X-Slack-Signature in requests results in 500 ### Description I noticed that a test I wrote to simulate a user probing the events URL returned a 500 when I would have expected a 400. ### What type of issue is this? (place an `x` in one of the `[ ]`) - [x] bug - [ ] enhancement (feature request) - [ ] question - [ ] documentation related - [ ] testing related - [ ] discussion ### Requirements * [x] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them. -- _However, Spotify, my employer, has not yet signed the CLA to my knowledge, so I will not be able to submit a patch until they do._ * [x] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct). * [x] I've searched for any related issues and avoided creating a duplicate issue. ### Bug Report Filling out the following details about bugs will help us solve your issue sooner. #### Reproducible in: slackeventsapi version: 2.2.0 python version: 3.6 OS version(s): irrelevant, but Linux and MacOS #### Steps to reproduce: 1. Make a POST request to a slack events URL without `X-Slack-Events-Timestamp` in the headers. 2. Observe response of 500 and subsequent TypeError raised #### Expected result: A 400 error. #### Actual result: 500 and unhandled exception. #### Attachments: </issue> <code> [start of README.rst] 1 Slack Events API adapter for Python 2 =================================== 3 4 .. image:: https://badge.fury.io/py/slackeventsapi.svg 5 :target: https://pypi.org/project/slackeventsapi/ 6 .. image:: https://travis-ci.org/slackapi/python-slack-events-api.svg?branch=master 7 :target: https://travis-ci.org/slackapi/python-slack-events-api 8 .. image:: https://codecov.io/gh/slackapi/python-slack-events-api/branch/master/graph/badge.svg 9 :target: https://codecov.io/gh/slackapi/python-slack-events-api 10 11 12 The Slack Events Adapter is a Python-based solution to receive and parse events 13 from Slack’s Events API. This library uses an event emitter framework to allow 14 you to easily process Slack events by simply attaching functions 15 to event listeners. 16 17 This adapter enhances and simplifies Slack's Events API by incorporating useful best practices, patterns, and opportunities to abstract out common tasks. 18 19 πŸ’‘ We wrote a `blog post which explains how`_ the Events API can help you, why we built these tools, and how you can use them to build production-ready Slack apps. 20 21 .. _blog post which explains how: https://medium.com/@SlackAPI/enhancing-slacks-events-api-7535827829ab 22 23 24 πŸ€– Installation 25 ---------------- 26 27 .. code:: shell 28 29 pip install slackeventsapi 30 31 πŸ€– App Setup 32 -------------------- 33 34 Before you can use the `Events API`_ you must 35 `create a Slack App`_, and turn on 36 `Event Subscriptions`_. 37 38 πŸ’‘ When you add the Request URL to your app's Event Subscription settings, 39 Slack will send a request containing a `challenge` code to verify that your 40 server is alive. This package handles that URL Verification event for you, so 41 all you need to do is start the example app, start ngrok and configure your 42 URL accordingly. 43 44 βœ… Once you have your `Request URL` verified, your app is ready to start 45 receiving Team Events. 46 47 πŸ”‘ Your server will begin receiving Events from Slack's Events API as soon as a 48 user has authorized your app. 49 50 πŸ€– Development workflow: 51 =========================== 52 53 (1) Create a Slack app on https://api.slack.com/apps 54 (2) Add a `bot user` for your app 55 (3) Start the example app on your **Request URL** endpoint 56 (4) Start ngrok and copy the **HTTPS** URL 57 (5) Add your **Request URL** and subscribe your app to events 58 (6) Go to your ngrok URL (e.g. https://myapp12.ngrok.com/) and auth your app 59 60 **πŸŽ‰ Once your app has been authorized, you will begin receiving Slack Events** 61 62 ⚠️ Ngrok is a great tool for developing Slack apps, but we don't recommend using ngrok 63 for production apps. 64 65 πŸ€– Usage 66 ---------- 67 **⚠️ Keep your app's credentials safe!** 68 69 - For development, keep them in virtualenv variables. 70 71 - For production, use a secure data store. 72 73 - Never post your app's credentials to github. 74 75 .. code:: python 76 77 SLACK_SIGNING_SECRET = os.environ["SLACK_SIGNING_SECRET"] 78 79 Create a Slack Event Adapter for receiving actions via the Events API 80 ----------------------------------------------------------------------- 81 **Using the built-in Flask server:** 82 83 .. code:: python 84 85 from slackeventsapi import SlackEventAdapter 86 87 88 slack_events_adapter = SlackEventAdapter(SLACK_SIGNING_SECRET, endpoint="/slack/events") 89 90 91 # Create an event listener for "reaction_added" events and print the emoji name 92 @slack_events_adapter.on("reaction_added") 93 def reaction_added(event_data): 94 emoji = event_data["event"]["reaction"] 95 print(emoji) 96 97 98 # Start the server on port 3000 99 slack_events_adapter.start(port=3000) 100 101 102 **Using your existing Flask instance:** 103 104 105 .. code:: python 106 107 from flask import Flask 108 from slackeventsapi import SlackEventAdapter 109 110 111 # This `app` represents your existing Flask app 112 app = Flask(__name__) 113 114 115 # An example of one of your Flask app's routes 116 @app.route("/") 117 def hello(): 118 return "Hello there!" 119 120 121 # Bind the Events API route to your existing Flask app by passing the server 122 # instance as the last param, or with `server=app`. 123 slack_events_adapter = SlackEventAdapter(SLACK_SIGNING_SECRET, "/slack/events", app) 124 125 126 # Create an event listener for "reaction_added" events and print the emoji name 127 @slack_events_adapter.on("reaction_added") 128 def reaction_added(event_data): 129 emoji = event_data["event"]["reaction"] 130 print(emoji) 131 132 133 # Start the server on port 3000 134 if __name__ == "__main__": 135 app.run(port=3000) 136 137 For a comprehensive list of available Slack `Events` and more information on 138 `Scopes`, see https://api.slack.com/events-api 139 140 πŸ€– Example event listeners 141 ----------------------------- 142 143 See `example.py`_ for usage examples. This example also utilizes the 144 SlackClient Web API client. 145 146 .. _example.py: /example/ 147 148 πŸ€” Support 149 ----------- 150 151 Need help? Join `Slack Community`_ and talk to us in `#slack-api`_. 152 153 You can also `create an Issue`_ right here on GitHub. 154 155 .. _Events API: https://api.slack.com/events-api 156 .. _create a Slack App: https://api.slack.com/apps/new 157 .. _Event Subscriptions: https://api.slack.com/events-api#subscriptions 158 .. _Slack Community: http://slackcommunity.com/ 159 .. _#slack-api: https://dev4slack.slack.com/messages/slack-api/ 160 .. _create an Issue: https://github.com/slackapi/python-slack-events-api/issues/new 161 [end of README.rst] [start of slackeventsapi/server.py] 1 import hashlib 2 import hmac 3 import json 4 import platform 5 import sys 6 from time import time 7 8 from flask import Flask, request, make_response, Blueprint 9 from werkzeug.local import LocalProxy 10 11 from .version import __version__ 12 13 14 class SlackServer(Flask): 15 def __init__(self, signing_secret, endpoint, emitter, server): 16 self.signing_secret = signing_secret 17 self.emitter = emitter 18 self.endpoint = endpoint 19 self.package_info = self.get_package_info() 20 21 # If a server is passed in, bind the event handler routes to it, 22 # otherwise create a new Flask instance. 23 if server: 24 if isinstance(server, (Flask, Blueprint, LocalProxy)): 25 self.bind_route(server) 26 else: 27 raise TypeError("Server must be an instance of Flask, Blueprint, or LocalProxy") 28 else: 29 Flask.__init__(self, __name__) 30 self.bind_route(self) 31 32 def get_package_info(self): 33 client_name = __name__.split('.')[0] 34 client_version = __version__ # Version is returned from version.py 35 36 # Collect the package info, Python version and OS version. 37 package_info = { 38 "client": "{0}/{1}".format(client_name, client_version), 39 "python": "Python/{v.major}.{v.minor}.{v.micro}".format(v=sys.version_info), 40 "system": "{0}/{1}".format(platform.system(), platform.release()) 41 } 42 43 # Concatenate and format the user-agent string to be passed into request headers 44 ua_string = [] 45 for key, val in package_info.items(): 46 ua_string.append(val) 47 48 return " ".join(ua_string) 49 50 def verify_signature(self, timestamp, signature): 51 # Verify the request signature of the request sent from Slack 52 # Generate a new hash using the app's signing secret and request data 53 54 # Compare the generated hash and incoming request signature 55 # Python 2.7.6 doesn't support compare_digest 56 # It's recommended to use Python 2.7.7+ 57 # noqa See https://docs.python.org/2/whatsnew/2.7.html#pep-466-network-security-enhancements-for-python-2-7 58 req = str.encode('v0:' + str(timestamp) + ':') + request.get_data() 59 request_hash = 'v0=' + hmac.new( 60 str.encode(self.signing_secret), 61 req, hashlib.sha256 62 ).hexdigest() 63 64 if hasattr(hmac, "compare_digest"): 65 # Compare byte strings for Python 2 66 if (sys.version_info[0] == 2): 67 return hmac.compare_digest(bytes(request_hash), bytes(signature)) 68 else: 69 return hmac.compare_digest(request_hash, signature) 70 else: 71 if len(request_hash) != len(signature): 72 return False 73 result = 0 74 if isinstance(request_hash, bytes) and isinstance(signature, bytes): 75 for x, y in zip(request_hash, signature): 76 result |= x ^ y 77 else: 78 for x, y in zip(request_hash, signature): 79 result |= ord(x) ^ ord(y) 80 return result == 0 81 82 def bind_route(self, server): 83 @server.route(self.endpoint, methods=['GET', 'POST']) 84 def event(): 85 # If a GET request is made, return 404. 86 if request.method == 'GET': 87 return make_response("These are not the slackbots you're looking for.", 404) 88 89 # Each request comes with request timestamp and request signature 90 # emit an error if the timestamp is out of range 91 req_timestamp = request.headers.get('X-Slack-Request-Timestamp') 92 if abs(time() - int(req_timestamp)) > 60 * 5: 93 slack_exception = SlackEventAdapterException('Invalid request timestamp') 94 self.emitter.emit('error', slack_exception) 95 return make_response("", 403) 96 97 # Verify the request signature using the app's signing secret 98 # emit an error if the signature can't be verified 99 req_signature = request.headers.get('X-Slack-Signature') 100 if not self.verify_signature(req_timestamp, req_signature): 101 slack_exception = SlackEventAdapterException('Invalid request signature') 102 self.emitter.emit('error', slack_exception) 103 return make_response("", 403) 104 105 # Parse the request payload into JSON 106 event_data = json.loads(request.data.decode('utf-8')) 107 108 # Echo the URL verification challenge code back to Slack 109 if "challenge" in event_data: 110 return make_response( 111 event_data.get("challenge"), 200, {"content_type": "application/json"} 112 ) 113 114 # Parse the Event payload and emit the event to the event listener 115 if "event" in event_data: 116 event_type = event_data["event"]["type"] 117 self.emitter.emit(event_type, event_data) 118 response = make_response("", 200) 119 response.headers['X-Slack-Powered-By'] = self.package_info 120 return response 121 122 123 class SlackEventAdapterException(Exception): 124 """ 125 Base exception for all errors raised by the SlackClient library 126 """ 127 128 def __init__(self, msg=None): 129 if msg is None: 130 # default error message 131 msg = "An error occurred in the SlackEventsApiAdapter library" 132 super(SlackEventAdapterException, self).__init__(msg) 133 [end of slackeventsapi/server.py] </code> Here is an example of a patch file. It consists of changes to the code base. It specifies the file names, the line numbers of each change, and the removed and added lines. A single patch file can contain changes to multiple files. <patch> --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + points.append((x, y)) return points </patch> I need you to solve the provided issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the format shown above. Respond below:
slackapi/python-slack-events-api
2d298a9b90b5b4a5599d120bd0acb45a1b77fa4b
Lack of X-Slack-Request-Timestamp and/or X-Slack-Signature in requests results in 500 ### Description I noticed that a test I wrote to simulate a user probing the events URL returned a 500 when I would have expected a 400. ### What type of issue is this? (place an `x` in one of the `[ ]`) - [x] bug - [ ] enhancement (feature request) - [ ] question - [ ] documentation related - [ ] testing related - [ ] discussion ### Requirements * [x] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them. -- _However, Spotify, my employer, has not yet signed the CLA to my knowledge, so I will not be able to submit a patch until they do._ * [x] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct). * [x] I've searched for any related issues and avoided creating a duplicate issue. ### Bug Report Filling out the following details about bugs will help us solve your issue sooner. #### Reproducible in: slackeventsapi version: 2.2.0 python version: 3.6 OS version(s): irrelevant, but Linux and MacOS #### Steps to reproduce: 1. Make a POST request to a slack events URL without `X-Slack-Events-Timestamp` in the headers. 2. Observe response of 500 and subsequent TypeError raised #### Expected result: A 400 error. #### Actual result: 500 and unhandled exception. #### Attachments: Lack of X-Slack-Request-Timestamp and/or X-Slack-Signature in requests results in 500 ### Description I noticed that a test I wrote to simulate a user probing the events URL returned a 500 when I would have expected a 400. ### What type of issue is this? (place an `x` in one of the `[ ]`) - [x] bug - [ ] enhancement (feature request) - [ ] question - [ ] documentation related - [ ] testing related - [ ] discussion ### Requirements * [x] I've read and understood the [Contributing guidelines](https://github.com/slackapi/python-slack-events-api/blob/master/.github/contributing.md) and have done my best effort to follow them. -- _However, Spotify, my employer, has not yet signed the CLA to my knowledge, so I will not be able to submit a patch until they do._ * [x] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct). * [x] I've searched for any related issues and avoided creating a duplicate issue. ### Bug Report Filling out the following details about bugs will help us solve your issue sooner. #### Reproducible in: slackeventsapi version: 2.2.0 python version: 3.6 OS version(s): irrelevant, but Linux and MacOS #### Steps to reproduce: 1. Make a POST request to a slack events URL without `X-Slack-Events-Timestamp` in the headers. 2. Observe response of 500 and subsequent TypeError raised #### Expected result: A 400 error. #### Actual result: 500 and unhandled exception. #### Attachments:
Apologies for submitting without the form filled out, I have completed it now. The fix is quite simple, but I will need to get Spotify to sign the CLA (or confirm that they already have) to fix it myself. This also affects lack of Signature, though that may want to be a separate issue as it is an entirely different part of the code that assumes this will be set. :) Going deeper, it seems like the only thing this library does on errors is raise exceptions, despite the few places it appears to want to send a 403. Perhaps I missed a doc? Apologies for submitting without the form filled out, I have completed it now. The fix is quite simple, but I will need to get Spotify to sign the CLA (or confirm that they already have) to fix it myself. This also affects lack of Signature, though that may want to be a separate issue as it is an entirely different part of the code that assumes this will be set. :) Going deeper, it seems like the only thing this library does on errors is raise exceptions, despite the few places it appears to want to send a 403. Perhaps I missed a doc?
2020-07-24T02:34:34Z
<patch> diff --git a/slackeventsapi/server.py b/slackeventsapi/server.py --- a/slackeventsapi/server.py +++ b/slackeventsapi/server.py @@ -89,7 +89,7 @@ def event(): # Each request comes with request timestamp and request signature # emit an error if the timestamp is out of range req_timestamp = request.headers.get('X-Slack-Request-Timestamp') - if abs(time() - int(req_timestamp)) > 60 * 5: + if req_timestamp is None or abs(time() - int(req_timestamp)) > 60 * 5: slack_exception = SlackEventAdapterException('Invalid request timestamp') self.emitter.emit('error', slack_exception) return make_response("", 403) @@ -97,7 +97,7 @@ def event(): # Verify the request signature using the app's signing secret # emit an error if the signature can't be verified req_signature = request.headers.get('X-Slack-Signature') - if not self.verify_signature(req_timestamp, req_signature): + if req_signature is None or not self.verify_signature(req_timestamp, req_signature): slack_exception = SlackEventAdapterException('Invalid request signature') self.emitter.emit('error', slack_exception) return make_response("", 403) </patch>
diff --git a/tests/test_server.py b/tests/test_server.py --- a/tests/test_server.py +++ b/tests/test_server.py @@ -56,6 +56,32 @@ def test_url_challenge(client): assert bytes.decode(res.data) == "valid_challenge_token" +def test_no_request_timestamp_header(client): + data = pytest.reaction_event_fixture + with pytest.raises(SlackEventAdapterException) as excinfo: + res = client.post( + '/slack/events', + data=data, + content_type='application/json', + headers={} + ) + assert str(excinfo.value) == 'Invalid request timestamp' + +def test_no_request_signature_header(client): + data = pytest.reaction_event_fixture + timestamp = int(time.time()) + with pytest.raises(SlackEventAdapterException) as excinfo: + res = client.post( + '/slack/events', + data=data, + content_type='application/json', + headers={ + 'X-Slack-Request-Timestamp': timestamp, # valid + } + ) + assert str(excinfo.value) == 'Invalid request signature' + + def test_invalid_request_signature(client): # Verify [package metadata header is set slack_adapter = SlackEventAdapter("SIGNING_SECRET")
1.0