text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _GetCodegenFromFlags(args): """Create a codegen object from flags."""
discovery_doc = _GetDiscoveryDocFromFlags(args) names = util.Names( args.strip_prefix, args.experimental_name_convention, args.experimental_capitalize_enums) if args.client_json: try: with io.open(args.client_json, encoding='utf8') as client_json: f = json.loads(util.ReplaceHomoglyphs(client_json.read())) web = f.get('installed', f.get('web', {})) client_id = web.get('client_id') client_secret = web.get('client_secret') except IOError: raise exceptions.NotFoundError( 'Failed to open client json file: %s' % args.client_json) else: client_id = args.client_id client_secret = args.client_secret if not client_id: logging.warning('No client ID supplied') client_id = '' if not client_secret: logging.warning('No client secret supplied') client_secret = '' client_info = util.ClientInfo.Create( discovery_doc, args.scope, client_id, client_secret, args.user_agent, names, args.api_key) outdir = os.path.expanduser(args.outdir) or client_info.default_directory if os.path.exists(outdir) and not args.overwrite: raise exceptions.ConfigurationValueError( 'Output directory exists, pass --overwrite to replace ' 'the existing files.') if not os.path.exists(outdir): os.makedirs(outdir) return gen_client_lib.DescriptorGenerator( discovery_doc, client_info, names, args.root_package, outdir, base_package=args.base_package, protorpc_package=args.protorpc_package, init_wildcards_file=(args.init_file == 'wildcards'), use_proto2=args.experimental_proto2_output, unelidable_request_methods=args.unelidable_request_methods, apitools_version=args.apitools_version)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GenerateClient(args): """Driver for client code generation."""
codegen = _GetCodegenFromFlags(args) if codegen is None: logging.error('Failed to create codegen, exiting.') return 128 _WriteGeneratedFiles(args, codegen) if args.init_file != 'none': _WriteInit(codegen)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GeneratePipPackage(args): """Generate a client as a pip-installable tarball."""
discovery_doc = _GetDiscoveryDocFromFlags(args) package = discovery_doc['name'] original_outdir = os.path.expanduser(args.outdir) args.outdir = os.path.join( args.outdir, 'apitools/clients/%s' % package) args.root_package = 'apitools.clients.%s' % package codegen = _GetCodegenFromFlags(args) if codegen is None: logging.error('Failed to create codegen, exiting.') return 1 _WriteGeneratedFiles(args, codegen) _WriteInit(codegen) with util.Chdir(original_outdir): _WriteSetupPy(codegen) with util.Chdir('apitools'): _WriteIntermediateInit(codegen) with util.Chdir('clients'): _WriteIntermediateInit(codegen)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(self, size=None): # pylint: disable=missing-docstring """Read at most size bytes from this slice. Compared to other streams, there is one case where we may unexpectedly raise an exception on read: if the underlying stream is exhausted (i.e. returns no bytes on read), and the size of this slice indicates we should still be able to read more bytes, we raise exceptions.StreamExhausted. Args: size: If provided, read no more than size bytes from the stream. Returns: The bytes read from this slice. Raises: exceptions.StreamExhausted """
if size is not None: read_size = min(size, self.__remaining_bytes) else: read_size = self.__remaining_bytes data = self.__stream.read(read_size) if read_size > 0 and not data: raise exceptions.StreamExhausted( 'Not enough bytes in stream; expected %d, exhausted ' 'after %d' % ( self.__max_bytes, self.__max_bytes - self.__remaining_bytes)) self.__remaining_bytes -= len(data) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_md5(filenames): """Update our built-in md5 registry"""
import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Add(self, service, method, request, global_params=None): """Add a request to the batch. Args: service: A class inheriting base_api.BaseApiService. method: A string indicated desired method from the service. See the example in the class docstring. request: An input message appropriate for the specified service.method. global_params: Optional additional parameters to pass into method.PrepareHttpRequest. Returns: None """
# Retrieve the configs for the desired method and service. method_config = service.GetMethodConfig(method) upload_config = service.GetUploadConfig(method) # Prepare the HTTP Request. http_request = service.PrepareHttpRequest( method_config, request, global_params=global_params, upload_config=upload_config) # Create the request and add it to our master list. api_request = self.ApiCall( http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Execute(self, http, sleep_between_polls=5, max_retries=5, max_batch_size=None, batch_request_callback=None): """Execute all of the requests in the batch. Args: http: httplib2.Http object for use in the request. sleep_between_polls: Integer number of seconds to sleep between polls. max_retries: Max retries. Any requests that have not succeeded by this number of retries simply report the last response or exception, whatever it happened to be. max_batch_size: int, if specified requests will be split in batches of given size. batch_request_callback: function of (http_response, exception) passed to BatchHttpRequest which will be run on any given results. Returns: List of ApiCalls. """
requests = [request for request in self.api_requests if not request.terminal_state] batch_size = max_batch_size or len(requests) for attempt in range(max_retries): if attempt: time.sleep(sleep_between_polls) for i in range(0, len(requests), batch_size): # Create a batch_http_request object and populate it with # incomplete requests. batch_http_request = BatchHttpRequest( batch_url=self.batch_url, callback=batch_request_callback, response_encoding=self.response_encoding ) for request in itertools.islice(requests, i, i + batch_size): batch_http_request.Add( request.http_request, request.HandleResponse) batch_http_request.Execute(http) if hasattr(http.request, 'credentials'): if any(request.authorization_failed for request in itertools.islice(requests, i, i + batch_size)): http.request.credentials.refresh(http) # Collect retryable requests. requests = [request for request in self.api_requests if not request.terminal_state] if not requests: break return self.api_requests
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ConvertHeaderToId(header): """Convert a Content-ID header value to an id. Presumes the Content-ID header conforms to the format that _ConvertIdToHeader() returns. Args: header: A string indicating the Content-ID header value. Returns: The extracted id value. Raises: BatchError if the header is not in the expected format. """
if not (header.startswith('<') or header.endswith('>')): raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) if '+' not in header: raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) _, request_id = header[1:-1].rsplit('+', 1) return urllib_parse.unquote(request_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _SerializeRequest(self, request): """Convert a http_wrapper.Request object into a string. Args: request: A http_wrapper.Request to serialize. Returns: The request as a string in application/http format. """
# Construct status line parsed = urllib_parse.urlsplit(request.url) request_line = urllib_parse.urlunsplit( ('', '', parsed.path, parsed.query, '')) if not isinstance(request_line, six.text_type): request_line = request_line.decode('utf-8') status_line = u' '.join(( request.http_method, request_line, u'HTTP/1.1\n' )) major, minor = request.headers.get( 'content-type', 'application/json').split('/') msg = mime_nonmultipart.MIMENonMultipart(major, minor) # MIMENonMultipart adds its own Content-Type header. # Keep all of the other headers in `request.headers`. for key, value in request.headers.items(): if key == 'content-type': continue msg[key] = value msg['Host'] = parsed.netloc msg.set_unixfrom(None) if request.body is not None: msg.set_payload(request.body) # Serialize the mime message. str_io = six.StringIO() # maxheaderlen=0 means don't line wrap headers. gen = generator.Generator(str_io, maxheaderlen=0) gen.flatten(msg, unixfrom=False) body = str_io.getvalue() return status_line + body
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _DeserializeResponse(self, payload): """Convert string into Response and content. Args: payload: Header and body string to be deserialized. Returns: A Response object """
# Strip off the status line. status_line, payload = payload.split('\n', 1) _, status, _ = status_line.split(' ', 2) # Parse the rest of the response. parser = email_parser.Parser() msg = parser.parsestr(payload) # Get the headers. info = dict(msg) info['status'] = status # Create Response from the parsed headers. content = msg.get_payload() return http_wrapper.Response(info, content, self.__batch_url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Add(self, request, callback=None): """Add a new request. Args: request: A http_wrapper.Request to add to the batch. callback: A callback to be called for this response, of the form callback(response, exception). The first parameter is the deserialized response object. The second is an apiclient.errors.HttpError exception object if an HTTP error occurred while processing the request, or None if no errors occurred. Returns: None """
handler = RequestResponseAndHandler(request, None, callback) self.__request_response_handlers[self._NewId()] = handler
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _Execute(self, http): """Serialize batch request, send to server, process response. Args: http: A httplib2.Http object to be used to make the request with. Raises: httplib2.HttpLib2Error if a transport error has occured. apiclient.errors.BatchError if the response is the wrong format. """
message = mime_multipart.MIMEMultipart('mixed') # Message should not write out its own headers. setattr(message, '_write_headers', lambda self: None) # Add all the individual requests. for key in self.__request_response_handlers: msg = mime_nonmultipart.MIMENonMultipart('application', 'http') msg['Content-Transfer-Encoding'] = 'binary' msg['Content-ID'] = self._ConvertIdToHeader(key) body = self._SerializeRequest( self.__request_response_handlers[key].request) msg.set_payload(body) message.attach(msg) request = http_wrapper.Request(self.__batch_url, 'POST') request.body = message.as_string() request.headers['content-type'] = ( 'multipart/mixed; boundary="%s"') % message.get_boundary() response = http_wrapper.MakeRequest(http, request) if response.status_code >= 300: raise exceptions.HttpError.FromResponse(response) # Prepend with a content-type header so Parser can handle it. header = 'content-type: %s\r\n\r\n' % response.info['content-type'] content = response.content if isinstance(content, bytes) and self.__response_encoding: content = response.content.decode(self.__response_encoding) parser = email_parser.Parser() mime_response = parser.parsestr(header + content) if not mime_response.is_multipart(): raise exceptions.BatchError( 'Response not in multipart/mixed format.') for part in mime_response.get_payload(): request_id = self._ConvertHeaderToId(part['Content-ID']) response = self._DeserializeResponse(part.get_payload()) # Disable protected access because namedtuple._replace(...) # is not actually meant to be protected. # pylint: disable=protected-access self.__request_response_handlers[request_id] = ( self.__request_response_handlers[request_id]._replace( response=response))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Execute(self, http): """Execute all the requests as a single batched HTTP request. Args: http: A httplib2.Http object to be used with the request. Returns: None Raises: BatchError if the response is the wrong format. """
self._Execute(http) for key in self.__request_response_handlers: response = self.__request_response_handlers[key].response callback = self.__request_response_handlers[key].handler exception = None if response.status_code >= 300: exception = exceptions.HttpError.FromResponse(response) if callback is not None: callback(response, exception) if self.__callback is not None: self.__callback(response, exception)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_definition(name, relative_to=None, importer=__import__): """Find definition by name in module-space. The find algorthm will look for definitions by name relative to a message definition or by fully qualfied name. If no definition is found relative to the relative_to parameter it will do the same search against the container of relative_to. If relative_to is a nested Message, it will search its message_definition(). If that message has no message_definition() it will search its module. If relative_to is a module, it will attempt to look for the containing module and search relative to it. If the module is a top-level module, it will look for the a message using a fully qualified name. If no message is found then, the search fails and DefinitionNotFoundError is raised. For example, when looking for any definition 'foo.bar.ADefinition' relative to an actual message definition abc.xyz.SomeMessage: find_definition('foo.bar.ADefinition', SomeMessage) It is like looking for the following fully qualified names: abc.xyz.SomeMessage. foo.bar.ADefinition abc.xyz. foo.bar.ADefinition abc. foo.bar.ADefinition foo.bar.ADefinition When resolving the name relative to Message definitions and modules, the algorithm searches any Messages or sub-modules found in its path. Non-Message values are not searched. A name that begins with '.' is considered to be a fully qualified name. The name is always searched for from the topmost package. For example, assume two message types: abc.xyz.SomeMessage xyz.SomeMessage Searching for '.xyz.SomeMessage' relative to 'abc' will resolve to 'xyz.SomeMessage' and not 'abc.xyz.SomeMessage'. For this kind of name, the relative_to parameter is effectively ignored and always set to None. For more information about package name resolution, please see: http://code.google.com/apis/protocolbuffers/docs/proto.html#packages Args: name: Name of definition to find. May be fully qualified or relative name. relative_to: Search for definition relative to message definition or module. None will cause a fully qualified name search. importer: Import function to use for resolving modules. Returns: Enum or Message class definition associated with name. Raises: DefinitionNotFoundError if no definition is found in any search path. """
# Check parameters. if not (relative_to is None or isinstance(relative_to, types.ModuleType) or isinstance(relative_to, type) and issubclass(relative_to, Message)): raise TypeError( 'relative_to must be None, Message definition or module.' ' Found: %s' % relative_to) name_path = name.split('.') # Handle absolute path reference. if not name_path[0]: relative_to = None name_path = name_path[1:] def search_path(): """Performs a single iteration searching the path from relative_to. This is the function that searches up the path from a relative object. fully.qualified.object . relative.or.nested.Definition ----------------------------> ^ | this part of search --+ Returns: Message or Enum at the end of name_path, else None. """ next_part = relative_to for node in name_path: # Look for attribute first. attribute = getattr(next_part, node, None) if attribute is not None: next_part = attribute else: # If module, look for sub-module. if (next_part is None or isinstance(next_part, types.ModuleType)): if next_part is None: module_name = node else: module_name = '%s.%s' % (next_part.__name__, node) try: fromitem = module_name.split('.')[-1] next_part = importer(module_name, '', '', [str(fromitem)]) except ImportError: return None else: return None if not isinstance(next_part, types.ModuleType): if not (isinstance(next_part, type) and issubclass(next_part, (Message, Enum))): return None return next_part while True: found = search_path() if isinstance(found, type) and issubclass(found, (Enum, Message)): return found else: # Find next relative_to to search against. # # fully.qualified.object . relative.or.nested.Definition # <--------------------- # ^ # | # does this part of search if relative_to is None: # Fully qualified search was done. Nothing found. Fail. raise DefinitionNotFoundError( 'Could not find definition for %s' % name) else: if isinstance(relative_to, types.ModuleType): # Find parent module. module_path = relative_to.__name__.split('.')[:-1] if not module_path: relative_to = None else: # Should not raise ImportError. If it does... # weird and unexpected. Propagate. relative_to = importer( '.'.join(module_path), '', '', [module_path[-1]]) elif (isinstance(relative_to, type) and issubclass(relative_to, Message)): parent = relative_to.message_definition() if parent is None: last_module_name = relative_to.__module__.split( '.')[-1] relative_to = importer( relative_to.__module__, '', '', [last_module_name]) else: relative_to = parent
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def definition_name(cls): """Helper method for creating definition name. Names will be generated to include the classes package name, scope (if the class is nested in another definition) and class name. By default, the package name for a definition is derived from its module name. However, this value can be overriden by placing a 'package' attribute in the module that contains the definition class. For example: package = 'some.alternate.package' class MyMessage(Message): some.alternate.package.MyMessage Returns: Dot-separated fully qualified name of definition. """
outer_definition_name = cls.outer_definition_name() if outer_definition_name is None: return six.text_type(cls.__name__) return u'%s.%s' % (outer_definition_name, cls.__name__)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def outer_definition_name(cls): """Helper method for creating outer definition name. Returns: If definition is nested, will return the outer definitions name, else the package name. """
outer_definition = cls.message_definition() if not outer_definition: return util.get_package_for_module(cls.__module__) return outer_definition.definition_name()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def definition_package(cls): """Helper method for creating creating the package of a definition. Returns: Name of package that definition belongs to. """
outer_definition = cls.message_definition() if not outer_definition: return util.get_package_for_module(cls.__module__) return outer_definition.definition_package()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(cls): """Make dictionary version of enumerated class. Dictionary created this way can be used with def_num. Returns: A dict (name) -> number """
return dict((item.name, item.number) for item in iter(cls))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_initialized(self): """Check class for initialization status. Check that all required fields are initialized Raises: ValidationError: If message is not initialized. """
for name, field in self.__by_name.items(): value = getattr(self, name) if value is None: if field.required: raise ValidationError( "Message %s is missing required field %s" % (type(self).__name__, name)) else: try: if (isinstance(field, MessageField) and issubclass(field.message_type, Message)): if field.repeated: for item in value: item_message_value = field.value_to_message( item) item_message_value.check_initialized() else: message_value = field.value_to_message(value) message_value.check_initialized() except ValidationError as err: if not hasattr(err, 'message_name'): err.message_name = type(self).__name__ raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_assigned_value(self, name): """Get the assigned value of an attribute. Get the underlying value of an attribute. If value has not been set, will not return the default for the field. Args: name: Name of attribute to get. Returns: Value of attribute, None if it has not been set. """
message_type = type(self) try: field = message_type.field_by_name(name) except KeyError: raise AttributeError('Message %s has no field %s' % ( message_type.__name__, name)) return self.__tags.get(field.number)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self, name): """Reset assigned value for field. Resetting a field will return it to its default value or None. Args: name: Name of field to reset. """
message_type = type(self) try: field = message_type.field_by_name(name) except KeyError: if name not in message_type.__by_name: raise AttributeError('Message %s has no field %s' % ( message_type.__name__, name)) if field.repeated: self.__tags[field.number] = FieldList(field, []) else: self.__tags.pop(field.number, None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_unrecognized_field_info(self, key, value_default=None, variant_default=None): """Get the value and variant of an unknown field in this message. Args: key: The name or number of the field to retrieve. value_default: Value to be returned if the key isn't found. variant_default: Value to be returned as variant if the key isn't found. Returns: (value, variant), where value and variant are whatever was passed to set_unrecognized_field. """
value, variant = self.__unrecognized_fields.get(key, (value_default, variant_default)) return value, variant
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_unrecognized_field(self, key, value, variant): """Set an unrecognized field, used when decoding a message. Args: key: The name or number used to refer to this unknown value. value: The value of the field. variant: Type information needed to interpret the value or re-encode it. Raises: TypeError: If the variant is not an instance of messages.Variant. """
if not isinstance(variant, Variant): raise TypeError('Variant type %s is not valid.' % variant) self.__unrecognized_fields[key] = value, variant
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append(self, value): """Validate item appending to list."""
self.__field.validate_element(value) return list.append(self, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extend(self, sequence): """Validate extension of list."""
self.__field.validate(sequence) return list.extend(self, sequence)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(self, index, value): """Validate item insertion to list."""
self.__field.validate_element(value) return list.insert(self, index, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_element(self, value): """Validate single element of field. This is different from validate in that it is used on individual values of repeated fields. Args: value: Value to validate. Returns: The value casted in the expected type. Raises: ValidationError if value is not expected type. """
if not isinstance(value, self.type): # Authorize int values as float. if isinstance(value, six.integer_types) and self.type == float: return float(value) if value is None: if self.required: raise ValidationError('Required field is missing') else: try: name = self.name except AttributeError: raise ValidationError('Expected type %s for %s, ' 'found %s (type %s)' % (self.type, self.__class__.__name__, value, type(value))) else: raise ValidationError( 'Expected type %s for field %s, found %s (type %s)' % (self.type, name, value, type(value))) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __validate(self, value, validate_element): """Internal validation function. Validate an internal value using a function to validate individual elements. Args: value: Value to validate. validate_element: Function to use to validate individual elements. Raises: ValidationError if value is not expected type. """
if not self.repeated: return validate_element(value) else: # Must be a list or tuple, may not be a string. if isinstance(value, (list, tuple)): result = [] for element in value: if element is None: try: name = self.name except AttributeError: raise ValidationError( 'Repeated values for %s ' 'may not be None' % self.__class__.__name__) else: raise ValidationError( 'Repeated values for field %s ' 'may not be None' % name) result.append(validate_element(element)) return result elif value is not None: try: name = self.name except AttributeError: raise ValidationError('%s is repeated. Found: %s' % ( self.__class__.__name__, value)) else: raise ValidationError( 'Field %s is repeated. Found: %s' % (name, value)) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_element(self, value): """Validate StringField allowing for str and unicode. Raises: ValidationError if a str value is not UTF-8. """
# If value is str is it considered valid. Satisfies "required=True". if isinstance(value, bytes): try: six.text_type(value, 'UTF-8') except UnicodeDecodeError as err: try: _ = self.name except AttributeError: validation_error = ValidationError( 'Field encountered non-UTF-8 string %r: %s' % (value, err)) else: validation_error = ValidationError( 'Field %s encountered non-UTF-8 string %r: %s' % ( self.name, value, err)) validation_error.field_name = self.name raise validation_error else: return super(StringField, self).validate_element(value) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def type(self): """Message type used for field."""
if self.__type is None: message_type = find_definition( self.__type_name, self.message_definition()) if not (message_type is not Message and isinstance(message_type, type) and issubclass(message_type, Message)): raise FieldDefinitionError( 'Invalid message class: %s' % message_type) self.__type = message_type return self.__type
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value_from_message(self, message): """Convert a message to a value instance. Used by deserializers to convert from underlying messages to value of expected user type. Args: message: A message instance of type self.message_type. Returns: Value of self.message_type. """
if not isinstance(message, self.message_type): raise DecodeError('Expected type %s, got %s: %r' % (self.message_type.__name__, type(message).__name__, message)) return message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value_to_message(self, value): """Convert a value instance to a message. Used by serializers to convert Python user types to underlying messages for transmission. Args: value: A value of type self.type. Returns: An instance of type self.message_type. """
if not isinstance(value, self.type): raise EncodeError('Expected type %s, got %s: %r' % (self.type.__name__, type(value).__name__, value)) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_default_element(self, value): """Validate default element of Enum field. Enum fields allow for delayed resolution of default values when the type of the field has not been resolved. The default value of a field may be a string or an integer. If the Enum type of the field has been resolved, the default value is validated against that type. Args: value: Value to validate. Raises: ValidationError if value is not expected message type. """
if isinstance(value, (six.string_types, six.integer_types)): # Validation of the value does not happen for delayed resolution # enumerated types. Ignore if type is not yet resolved. if self.__type: self.__type(value) return value return super(EnumField, self).validate_default_element(value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def type(self): """Enum type used for field."""
if self.__type is None: found_type = find_definition( self.__type_name, self.message_definition()) if not (found_type is not Enum and isinstance(found_type, type) and issubclass(found_type, Enum)): raise FieldDefinitionError( 'Invalid enum type: %s' % found_type) self.__type = found_type return self.__type
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default(self): """Default for enum field. Will cause resolution of Enum type and unresolved default value. """
try: return self.__resolved_default except AttributeError: resolved_default = super(EnumField, self).default if isinstance(resolved_default, (six.string_types, six.integer_types)): # pylint:disable=not-callable resolved_default = self.type(resolved_default) self.__resolved_default = resolved_default return self.__resolved_default
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def MessageToJson(message, include_fields=None): """Convert the given message to JSON."""
result = _ProtoJsonApiTools.Get().encode_message(message) return _IncludeFields(result, message, include_fields)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def DictToAdditionalPropertyMessage(properties, additional_property_type, sort_items=False): """Convert the given dictionary to an AdditionalProperty message."""
items = properties.items() if sort_items: items = sorted(items) map_ = [] for key, value in items: map_.append(additional_property_type.AdditionalProperty( key=key, value=value)) return additional_property_type(additionalProperties=map_)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def MessageToRepr(msg, multiline=False, **kwargs): """Return a repr-style string for a protorpc message. protorpc.Message.__repr__ does not return anything that could be considered python code. Adding this function lets us print a protorpc message in such a way that it could be pasted into code later, and used to compare against other things. Args: msg: protorpc.Message, the message to be repr'd. multiline: bool, True if the returned string should have each field assignment on its own line. **kwargs: {str:str}, Additional flags for how to format the string. Known **kwargs: shortstrings: bool, True if all string values should be truncated at 100 characters, since when mocking the contents typically don't matter except for IDs, and IDs are usually less than 100 characters. no_modules: bool, True if the long module name should not be printed with each type. Returns: str, A string of valid python (assuming the right imports have been made) that recreates the message passed into this function. """
# TODO(jasmuth): craigcitro suggests a pretty-printer from apitools/gen. indent = kwargs.get('indent', 0) def IndentKwargs(kwargs): kwargs = dict(kwargs) kwargs['indent'] = kwargs.get('indent', 0) + 4 return kwargs if isinstance(msg, list): s = '[' for item in msg: if multiline: s += '\n' + ' ' * (indent + 4) s += MessageToRepr( item, multiline=multiline, **IndentKwargs(kwargs)) + ',' if multiline: s += '\n' + ' ' * indent s += ']' return s if isinstance(msg, messages.Message): s = type(msg).__name__ + '(' if not kwargs.get('no_modules'): s = msg.__module__ + '.' + s names = sorted([field.name for field in msg.all_fields()]) for name in names: field = msg.field_by_name(name) if multiline: s += '\n' + ' ' * (indent + 4) value = getattr(msg, field.name) s += field.name + '=' + MessageToRepr( value, multiline=multiline, **IndentKwargs(kwargs)) + ',' if multiline: s += '\n' + ' ' * indent s += ')' return s if isinstance(msg, six.string_types): if kwargs.get('shortstrings') and len(msg) > 100: msg = msg[:100] if isinstance(msg, datetime.datetime): class SpecialTZInfo(datetime.tzinfo): def __init__(self, offset): super(SpecialTZInfo, self).__init__() self.offset = offset def __repr__(self): s = 'TimeZoneOffset(' + repr(self.offset) + ')' if not kwargs.get('no_modules'): s = 'apitools.base.protorpclite.util.' + s return s msg = datetime.datetime( msg.year, msg.month, msg.day, msg.hour, msg.minute, msg.second, msg.microsecond, SpecialTZInfo(msg.tzinfo.utcoffset(0))) return repr(msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _IncludeFields(encoded_message, message, include_fields): """Add the requested fields to the encoded message."""
if include_fields is None: return encoded_message result = json.loads(encoded_message) for field_name in include_fields: try: value = _GetField(message, field_name.split('.')) nullvalue = None if isinstance(value, list): nullvalue = [] except KeyError: raise exceptions.InvalidDataError( 'No field named %s in message of type %s' % ( field_name, type(message))) _SetField(result, field_name.split('.'), nullvalue) return json.dumps(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _DecodeUnknownFields(message, encoded_message): """Rewrite unknown fields in message into message.destination."""
destination = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message)) if destination is None: return message pair_field = message.field_by_name(destination) if not isinstance(pair_field, messages.MessageField): raise exceptions.InvalidDataFromServerError( 'Unrecognized fields must be mapped to a compound ' 'message type.') pair_type = pair_field.message_type # TODO(craigcitro): Add more error checking around the pair # type being exactly what we suspect (field names, etc). if isinstance(pair_type.value, messages.MessageField): new_values = _DecodeUnknownMessages( message, json.loads(encoded_message), pair_type) else: new_values = _DecodeUnrecognizedFields(message, pair_type) setattr(message, destination, new_values) # We could probably get away with not setting this, but # why not clear it? setattr(message, '_Message__unrecognized_fields', {}) return message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _DecodeUnknownMessages(message, encoded_message, pair_type): """Process unknown fields in encoded_message of a message type."""
field_type = pair_type.value.type new_values = [] all_field_names = [x.name for x in message.all_fields()] for name, value_dict in six.iteritems(encoded_message): if name in all_field_names: continue value = PyValueToMessage(field_type, value_dict) if pair_type.value.repeated: value = _AsMessageList(value) new_pair = pair_type(key=name, value=value) new_values.append(new_pair) return new_values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _DecodeUnrecognizedFields(message, pair_type): """Process unrecognized fields in message."""
new_values = [] codec = _ProtoJsonApiTools.Get() for unknown_field in message.all_unrecognized_fields(): # TODO(craigcitro): Consider validating the variant if # the assignment below doesn't take care of it. It may # also be necessary to check it in the case that the # type has multiple encodings. value, _ = message.get_unrecognized_field_info(unknown_field) value_type = pair_type.field_by_name('value') if isinstance(value_type, messages.MessageField): decoded_value = DictToMessage(value, pair_type.value.message_type) else: decoded_value = codec.decode_field( pair_type.value, value) try: new_pair_key = str(unknown_field) except UnicodeEncodeError: new_pair_key = protojson.ProtoJson().decode_field( pair_type.key, unknown_field) new_pair = pair_type(key=new_pair_key, value=decoded_value) new_values.append(new_pair) return new_values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _EncodeUnknownFields(message): """Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message)) if source is None: return message # CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use # the vanilla protojson-based copy function to avoid infinite recursion. result = _CopyProtoMessageVanillaProtoJson(message) pairs_field = message.field_by_name(source) if not isinstance(pairs_field, messages.MessageField): raise exceptions.InvalidUserInputError( 'Invalid pairs field %s' % pairs_field) pairs_type = pairs_field.message_type value_field = pairs_type.field_by_name('value') value_variant = value_field.variant pairs = getattr(message, source) codec = _ProtoJsonApiTools.Get() for pair in pairs: encoded_value = codec.encode_field(value_field, pair.value) result.set_unrecognized_field(pair.key, encoded_value, value_variant) setattr(result, source, []) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _SafeEncodeBytes(field, value): """Encode the bytes in value as urlsafe base64."""
try: if field.repeated: result = [base64.urlsafe_b64encode(byte) for byte in value] else: result = base64.urlsafe_b64encode(value) complete = True except TypeError: result = value complete = False return CodecResult(value=result, complete=complete)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _SafeDecodeBytes(unused_field, value): """Decode the urlsafe base64 value into bytes."""
try: result = base64.urlsafe_b64decode(str(value)) complete = True except TypeError: result = value complete = False return CodecResult(value=result, complete=complete)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ProcessUnknownEnums(message, encoded_message): """Add unknown enum values from encoded_message as unknown fields. ProtoRPC diverges from the usual protocol buffer behavior here and doesn't allow unknown fields. Throwing on unknown fields makes it impossible to let servers add new enum values and stay compatible with older clients, which isn't reasonable for us. We simply store unrecognized enum values as unknown fields, and all is well. Args: message: Proto message we've decoded thus far. encoded_message: JSON string we're decoding. Returns: message, with any unknown enums stored as unrecognized fields. """
if not encoded_message: return message decoded_message = json.loads(six.ensure_str(encoded_message)) for field in message.all_fields(): if (isinstance(field, messages.EnumField) and field.name in decoded_message and message.get_assigned_value(field.name) is None): message.set_unrecognized_field( field.name, decoded_message[field.name], messages.Variant.ENUM) return message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _ProcessUnknownMessages(message, encoded_message): """Store any remaining unknown fields as strings. ProtoRPC currently ignores unknown values for which no type can be determined (and logs a "No variant found" message). For the purposes of reserializing, this is quite harmful (since it throws away information). Here we simply add those as unknown fields of type string (so that they can easily be reserialized). Args: message: Proto message we've decoded thus far. encoded_message: JSON string we're decoding. Returns: message, with any remaining unrecognized fields saved. """
if not encoded_message: return message decoded_message = json.loads(six.ensure_str(encoded_message)) message_fields = [x.name for x in message.all_fields()] + list( message.all_unrecognized_fields()) missing_fields = [x for x in decoded_message.keys() if x not in message_fields] for field_name in missing_fields: message.set_unrecognized_field(field_name, decoded_message[field_name], messages.Variant.STRING) return message
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddCustomJsonEnumMapping(enum_type, python_name, json_name, package=None): # pylint: disable=unused-argument """Add a custom wire encoding for a given enum value. This is primarily used in generated code, to handle enum values which happen to be Python keywords. Args: enum_type: (messages.Enum) An enum type python_name: (basestring) Python name for this value. json_name: (basestring) JSON name to be used on the wire. package: (NoneType, optional) No effect, exists for legacy compatibility. """
if not issubclass(enum_type, messages.Enum): raise exceptions.TypecheckError( 'Cannot set JSON enum mapping for non-enum "%s"' % enum_type) if python_name not in enum_type.names(): raise exceptions.InvalidDataError( 'Enum value %s not a value for type %s' % (python_name, enum_type)) field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_type, {}) _CheckForExistingMappings('enum', enum_type, python_name, json_name) field_mappings[python_name] = json_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddCustomJsonFieldMapping(message_type, python_name, json_name, package=None): # pylint: disable=unused-argument """Add a custom wire encoding for a given message field. This is primarily used in generated code, to handle enum values which happen to be Python keywords. Args: message_type: (messages.Message) A message type python_name: (basestring) Python name for this value. json_name: (basestring) JSON name to be used on the wire. package: (NoneType, optional) No effect, exists for legacy compatibility. """
if not issubclass(message_type, messages.Message): raise exceptions.TypecheckError( 'Cannot set JSON field mapping for ' 'non-message "%s"' % message_type) try: _ = message_type.field_by_name(python_name) except KeyError: raise exceptions.InvalidDataError( 'Field %s not recognized for type %s' % ( python_name, message_type)) field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_type, {}) _CheckForExistingMappings('field', message_type, python_name, json_name) field_mappings[python_name] = json_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetCustomJsonEnumMapping(enum_type, python_name=None, json_name=None): """Return the appropriate remapping for the given enum, or None."""
return _FetchRemapping(enum_type, 'enum', python_name=python_name, json_name=json_name, mappings=_JSON_ENUM_MAPPINGS)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def GetCustomJsonFieldMapping(message_type, python_name=None, json_name=None): """Return the appropriate remapping for the given field, or None."""
return _FetchRemapping(message_type, 'field', python_name=python_name, json_name=json_name, mappings=_JSON_FIELD_MAPPINGS)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None, mappings=None): """Common code for fetching a key or value from a remapping dict."""
if python_name and json_name: raise exceptions.InvalidDataError( 'Cannot specify both python_name and json_name ' 'for %s remapping' % mapping_type) if not (python_name or json_name): raise exceptions.InvalidDataError( 'Must specify either python_name or json_name for %s remapping' % ( mapping_type,)) field_remappings = mappings.get(type_name, {}) if field_remappings: if python_name: return field_remappings.get(python_name) elif json_name: if json_name in list(field_remappings.values()): return [k for k in field_remappings if field_remappings[k] == json_name][0] return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _CheckForExistingMappings(mapping_type, message_type, python_name, json_name): """Validate that no mappings exist for the given values."""
if mapping_type == 'field': getter = GetCustomJsonFieldMapping elif mapping_type == 'enum': getter = GetCustomJsonEnumMapping remapping = getter(message_type, python_name=python_name) if remapping is not None and remapping != json_name: raise exceptions.InvalidDataError( 'Cannot add mapping for %s "%s", already mapped to "%s"' % ( mapping_type, python_name, remapping)) remapping = getter(message_type, json_name=json_name) if remapping is not None and remapping != python_name: raise exceptions.InvalidDataError( 'Cannot add mapping for %s "%s", already mapped to "%s"' % ( mapping_type, json_name, remapping))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _AsMessageList(msg): """Convert the provided list-as-JsonValue to a list."""
# This really needs to live in extra_types, but extra_types needs # to import this file to be able to register codecs. # TODO(craigcitro): Split out a codecs module and fix this ugly # import. from apitools.base.py import extra_types def _IsRepeatedJsonValue(msg): """Return True if msg is a repeated value as a JsonValue.""" if isinstance(msg, extra_types.JsonArray): return True if isinstance(msg, extra_types.JsonValue) and msg.array_value: return True return False if not _IsRepeatedJsonValue(msg): raise ValueError('invalid argument to _AsMessageList') if isinstance(msg, extra_types.JsonValue): msg = msg.array_value if isinstance(msg, extra_types.JsonArray): msg = msg.entries return msg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _IsMap(message, field): """Returns whether the "field" is actually a map-type."""
value = message.get_assigned_value(field.name) if not isinstance(value, messages.Message): return False try: additional_properties = value.field_by_name('additionalProperties') except KeyError: return False else: return additional_properties.repeated
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def UnrecognizedFieldIter(message, _edges=()): # pylint: disable=invalid-name """Yields the locations of unrecognized fields within "message". If a sub-message is found to have unrecognized fields, that sub-message will not be searched any further. We prune the search of the sub-message because we assume it is malformed and further checks will not yield productive errors. Args: message: The Message instance to search. _edges: Internal arg for passing state. Yields: (edges_to_message, field_names): edges_to_message: List[ProtoEdge], The edges (relative to "message") describing the path to the sub-message where the unrecognized fields were found. field_names: List[Str], The names of the field(s) that were unrecognized in the sub-message. """
if not isinstance(message, messages.Message): # This is a primitive leaf, no errors found down this path. return field_names = message.all_unrecognized_fields() if field_names: # This message is malformed. Stop recursing and report it. yield _edges, field_names return # Recurse through all fields in the current message. for field in message.all_fields(): value = message.get_assigned_value(field.name) if field.repeated: for i, item in enumerate(value): repeated_edge = ProtoEdge(EdgeType.REPEATED, field.name, i) iter_ = UnrecognizedFieldIter(item, _edges + (repeated_edge,)) for (e, y) in iter_: yield e, y elif _IsMap(message, field): for key, item in _MapItems(message, field): map_edge = ProtoEdge(EdgeType.MAP, field.name, key) iter_ = UnrecognizedFieldIter(item, _edges + (map_edge,)) for (e, y) in iter_: yield e, y else: scalar_edge = ProtoEdge(EdgeType.SCALAR, field.name, None) iter_ = UnrecognizedFieldIter(value, _edges + (scalar_edge,)) for (e, y) in iter_: yield e, y
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_field(self, field, value): """Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field. """
for decoder in _GetFieldCodecs(field, 'decoder'): result = decoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.MessageField): field_value = self.decode_message( field.message_type, json.dumps(value)) elif isinstance(field, messages.EnumField): value = GetCustomJsonEnumMapping( field.type, json_name=value) or value try: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) except messages.DecodeError: if not isinstance(value, six.string_types): raise field_value = None else: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) return field_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode_field(self, field, value): """Encode the given value as JSON. Args: field: a messages.Field for the field we're encoding. value: a value for field. Returns: A python value suitable for json.dumps. """
for encoder in _GetFieldCodecs(field, 'encoder'): result = encoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.EnumField): if field.repeated: remapped_value = [GetCustomJsonEnumMapping( field.type, python_name=e.name) or e.name for e in value] else: remapped_value = GetCustomJsonEnumMapping( field.type, python_name=value.name) if remapped_value: return remapped_value if (isinstance(field, messages.MessageField) and not isinstance(field, message_types.DateTimeField)): value = json.loads(self.encode_message(value)) return super(_ProtoJsonApiTools, self).encode_field(field, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_class_by_path(taskpath): """ Given a taskpath, returns the main task class. """
return getattr( importlib.import_module( re.sub( r"\.[^.]+$", "", taskpath)), re.sub( r"^.*\.", "", taskpath))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def queue_raw_jobs(queue, params_list, **kwargs): """ Queue some jobs on a raw queue """
from .queue import Queue queue_obj = Queue(queue) queue_obj.enqueue_raw_jobs(params_list, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def queue_jobs(main_task_path, params_list, queue=None, batch_size=1000): """ Queue multiple jobs on a regular queue """
if len(params_list) == 0: return [] if queue is None: task_def = context.get_current_config().get("tasks", {}).get(main_task_path) or {} queue = task_def.get("queue", "default") from .queue import Queue queue_obj = Queue(queue) if queue_obj.is_raw: raise Exception("Can't queue regular jobs on a raw queue") all_ids = [] for params_group in group_iter(params_list, n=batch_size): context.metric("jobs.status.queued", len(params_group)) # Insert the job in MongoDB job_ids = Job.insert([{ "path": main_task_path, "params": params, "queue": queue, "datequeued": datetime.datetime.utcnow(), "status": "queued" } for params in params_group], w=1, return_jobs=False) all_ids += job_ids queue_obj.notify(len(all_ids)) set_queues_size({queue: len(all_ids)}) return all_ids
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch(self, start=False, full_data=True): """ Get the current job data and possibly flag it as started. """
if self.id is None: return self if full_data is True: fields = None elif isinstance(full_data, dict): fields = full_data else: fields = { "_id": 0, "path": 1, "params": 1, "status": 1, "retry_count": 1, } if start: self.datestarted = datetime.datetime.utcnow() self.set_data(self.collection.find_and_modify( { "_id": self.id, "status": {"$nin": ["cancel", "abort", "maxretries"]} }, {"$set": { "status": "started", "datestarted": self.datestarted, "worker": self.worker.id }, "$unset": { "dateexpires": 1 # we don't want started jobs to expire unexpectedly }}, projection=fields) ) context.metric("jobs.status.started") else: self.set_data(self.collection.find_one({ "_id": self.id }, projection=fields)) if self.data is None: context.log.info( "Job %s not found in MongoDB or status was cancelled!" % self.id) self.stored = True return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ Persists the current job metadata to MongoDB. Will be called at each worker report. """
if not self.saved and self.data and "progress" in self.data: # TODO should we save more fields? self.collection.update({"_id": self.id}, {"$set": { "progress": self.data["progress"] }}) self.saved = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(cls, jobs_data, queue=None, statuses_no_storage=None, return_jobs=True, w=None, j=None): """ Insert a job into MongoDB """
now = datetime.datetime.utcnow() for data in jobs_data: if data["status"] == "started": data["datestarted"] = now no_storage = (statuses_no_storage is not None) and ("started" in statuses_no_storage) if no_storage and return_jobs: for data in jobs_data: data["_id"] = ObjectId() # Give the job a temporary ID else: inserted = context.connections.mongodb_jobs.mrq_jobs.insert( jobs_data, manipulate=True, w=w, j=j ) if return_jobs: jobs = [] for data in jobs_data: job = cls(data["_id"], queue=queue) job.set_data(data) job.statuses_no_storage = statuses_no_storage job.stored = (not no_storage) if data["status"] == "started": job.datestarted = data["datestarted"] jobs.append(job) return jobs else: return inserted
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _attach_original_exception(self, exc): """ Often, a retry will be raised inside an "except" block. This Keep track of the first exception for debugging purposes """
original_exception = sys.exc_info() if original_exception[0] is not None: exc.original_exception = original_exception
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retry(self, queue=None, delay=None, max_retries=None): """ Marks the current job as needing to be retried. Interrupts it. """
max_retries = max_retries if max_retries is None: max_retries = self.max_retries if self.data.get("retry_count", 0) >= max_retries: raise MaxRetriesInterrupt() exc = RetryInterrupt() exc.queue = queue or self.queue or self.data.get("queue") or "default" exc.retry_count = self.data.get("retry_count", 0) + 1 exc.delay = delay if exc.delay is None: exc.delay = self.retry_delay self._attach_original_exception(exc) raise exc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def requeue(self, queue=None, retry_count=0): """ Requeues the current job. Doesn't interrupt it """
if not queue: if not self.data or not self.data.get("queue"): self.fetch(full_data={"_id": 0, "queue": 1, "path": 1}) queue = self.data["queue"] self._save_status("queued", updates={ "queue": queue, "datequeued": datetime.datetime.utcnow(), "retry_count": retry_count })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def perform(self): """ Loads and starts the main task for this job, the saves the result. """
if self.data is None: return context.log.debug("Starting %s(%s)" % (self.data["path"], self.data["params"])) task_class = load_class_by_path(self.data["path"]) self.task = task_class() self.task.is_main_task = True if not self.task.max_concurrency: result = self.task.run_wrapped(self.data["params"]) else: if self.task.max_concurrency > 1: raise NotImplementedError() lock = None try: # TODO: implement a semaphore lock = context.connections.redis.lock(self.redis_max_concurrency_key, timeout=self.timeout + 5) if not lock.acquire(blocking=True, blocking_timeout=0): raise MaxConcurrencyInterrupt() result = self.task.run_wrapped(self.data["params"]) finally: try: if lock: lock.release() except LockError: pass self.save_success(result) if context.get_current_config().get("trace_greenlets"): # TODO: this is not the exact greenlet_time measurement because it doesn't # take into account the last switch's time. This is why we force a last switch. # This does cause a performance overhead. Instead, we should print the # last timing directly from the trace() function in context? # pylint: disable=protected-access gevent.sleep(0) current_greenlet = gevent.getcurrent() t = (datetime.datetime.utcnow() - self.datestarted).total_seconds() context.log.debug( "Job %s success: %0.6fs total, %0.6fs in greenlet, %s switches" % (self.id, t, current_greenlet._trace_time, current_greenlet._trace_switches - 1) ) else: context.log.debug("Job %s success: %0.6fs total" % ( self.id, (datetime.datetime.utcnow() - self.datestarted).total_seconds() )) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait(self, poll_interval=1, timeout=None, full_data=False): """ Wait for this job to finish. """
end_time = None if timeout: end_time = time.time() + timeout while end_time is None or time.time() < end_time: job_data = self.collection.find_one({ "_id": ObjectId(self.id), "status": {"$nin": ["started", "queued"]} }, projection=({ "_id": 0, "result": 1, "status": 1 } if not full_data else None)) if job_data: return job_data time.sleep(poll_interval) raise Exception("Waited for job result for %s seconds, timeout." % timeout)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def kill(self, block=False, reason="unknown"): """ Forcefully kill all greenlets associated with this job """
current_greenletid = id(gevent.getcurrent()) trace = "Job killed: %s" % reason for greenlet, job in context._GLOBAL_CONTEXT["greenlets"].values(): greenletid = id(greenlet) if job and job.id == self.id and greenletid != current_greenletid: greenlet.kill(block=block) trace += "\n\n--- Greenlet %s ---\n" % greenletid trace += "".join(traceback.format_stack(greenlet.gr_frame)) context._GLOBAL_CONTEXT["greenlets"].pop(greenletid, None) if reason == "timeout" and self.data["status"] != "timeout": updates = { "exceptiontype": "TimeoutInterrupt", "traceback": trace } self._save_status("timeout", updates=updates, exception=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _save_traceback_history(self, status, trace, job_exc): """ Create traceback history or add a new traceback to history. """
failure_date = datetime.datetime.utcnow() new_history = { "date": failure_date, "status": status, "exceptiontype": job_exc.__name__ } traces = trace.split("---- Original exception: -----") if len(traces) > 1: new_history["original_traceback"] = traces[1] worker = context.get_current_worker() if worker: new_history["worker"] = worker.id new_history["traceback"] = traces[0] self.collection.update({ "_id": self.id }, {"$push": {"traceback_history": new_history}})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trace_memory_clean_caches(self): """ Avoid polluting results with some builtin python caches """
urllib.parse.clear_cache() re.purge() linecache.clearcache() copyreg.clear_extension_cache() if hasattr(fnmatch, "purge"): fnmatch.purge() # pylint: disable=no-member elif hasattr(fnmatch, "_purge"): fnmatch._purge() # pylint: disable=no-member if hasattr(encodings, "_cache") and len(encodings._cache) > 0: encodings._cache = {} for handler in context.log.handlers: handler.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trace_memory_start(self): """ Starts measuring memory consumption """
self.trace_memory_clean_caches() objgraph.show_growth(limit=30) gc.collect() self._memory_start = self.worker.get_memory()["total"]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def trace_memory_stop(self): """ Stops measuring memory consumption """
self.trace_memory_clean_caches() objgraph.show_growth(limit=30) trace_type = context.get_current_config()["trace_memory_type"] if trace_type: filename = '%s/%s-%s.png' % ( context.get_current_config()["trace_memory_output_dir"], trace_type, self.id) chain = objgraph.find_backref_chain( random.choice( objgraph.by_type(trace_type) ), objgraph.is_proper_module ) objgraph.show_chain(chain, filename=filename) del filename del chain gc.collect() self._memory_stop = self.worker.get_memory()["total"] diff = self._memory_stop - self._memory_start context.log.debug("Memory diff for job %s : %s" % (self.id, diff)) # We need to update it later than the results, we need them off memory # already. self.collection.update( {"_id": self.id}, {"$set": { "memory_diff": diff }}, w=1 )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_job_ids(self, skip=0, limit=20): """ Returns a list of job ids on a queue """
return [str(x["_id"]) for x in self.collection.find( {"status": "queued"}, sort=[("_id", -1 if self.is_reverse else 1)], projection={"_id": 1}) ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dequeue_jobs(self, max_jobs=1, job_class=None, worker=None): """ Fetch a maximum of max_jobs from this queue """
if job_class is None: from .job import Job job_class = Job count = 0 # TODO: remove _id sort after full migration to datequeued sort_order = [("datequeued", -1 if self.is_reverse else 1), ("_id", -1 if self.is_reverse else 1)] # MongoDB optimization: with many jobs it's faster to fetch the IDs first and do the atomic update second # Some jobs may have been stolen by another worker in the meantime but it's a balance (should we over-fetch?) # job_ids = None # if max_jobs > 5: # job_ids = [x["_id"] for x in self.collection.find( # self.base_dequeue_query, # limit=max_jobs, # sort=sort_order, # projection={"_id": 1} # )] # if len(job_ids) == 0: # return for i in range(max_jobs): # if job_ids is None else len(job_ids)): # if job_ids is not None: # query = { # "status": "queued", # "_id": job_ids[i] # } # sort_order = None # else: query = self.base_dequeue_query job_data = self.collection.find_one_and_update( query, {"$set": { "status": "started", "datestarted": datetime.datetime.utcnow(), "worker": worker.id if worker else None }, "$unset": { "dateexpires": 1 # we don't want started jobs to expire unexpectedly }}, sort=sort_order, return_document=ReturnDocument.AFTER, projection={ "_id": 1, "path": 1, "params": 1, "status": 1, "retry_count": 1, "queue": 1, "datequeued": 1 } ) if not job_data: break if worker: worker.status = "spawn" count += 1 context.metric("queues.%s.dequeued" % job_data["queue"], 1) job = job_class(job_data["_id"], queue=self.id, start=False) job.set_data(job_data) job.datestarted = datetime.datetime.utcnow() context.metric("jobs.status.started") yield job context.metric("queues.all.dequeued", count)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jsonify(*args, **kwargs): """ jsonify with support for MongoDB ObjectId """
return Response( json.dumps( dict( *args, **kwargs), cls=MongoJSONEncoder), mimetype='application/json')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def queuestats(self): """ Compute ETAs for every known queue & subqueue """
start_time = time.time() log.debug("Starting queue stats...") # Fetch all known queues queues = [Queue(q) for q in Queue.all_known()] new_queues = {queue.id for queue in queues} old_queues = set(self.queue_etas.keys()) for deleted_queue in old_queues.difference(new_queues): self.queue_etas.pop(deleted_queue) t = time.time() stats = {} for queue in queues: cnt = queue.count_jobs_to_dequeue() eta = self.queue_etas[queue.id].next(cnt, t=t) # Number of jobs to dequeue, ETA, Time of stats stats[queue.id] = "%d %s %d" % (cnt, eta if eta is not None else "N", int(t)) with connections.redis.pipeline(transaction=True) as pipe: if random.randint(0, 100) == 0 or len(stats) == 0: pipe.delete(self.redis_queuestats_key) if len(stats) > 0: pipe.hmset(self.redis_queuestats_key, stats) pipe.execute() log.debug("... done queue stats in %0.4fs" % (time.time() - start_time))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subpool_map(pool_size, func, iterable): """ Starts a Gevent pool and run a map. Takes care of setting current_job and cleaning up. """
from .context import get_current_job, set_current_job, log if not pool_size: return [func(*args) for args in iterable] counter = itertools_count() current_job = get_current_job() def inner_func(*args): """ As each call to 'func' will be done in a random greenlet of the subpool, we need to register their IDs with set_current_job() to make get_current_job() calls work properly inside 'func'. """ next(counter) if current_job: set_current_job(current_job) try: ret = func(*args) except Exception as exc: trace = traceback.format_exc() exc.subpool_traceback = trace raise if current_job: set_current_job(None) return ret def inner_iterable(): """ This will be called inside the pool's main greenlet, which ID also needs to be registered """ if current_job: set_current_job(current_job) for x in iterable: yield x if current_job: set_current_job(None) start_time = time.time() pool = gevent.pool.Pool(size=pool_size) ret = pool.map(inner_func, inner_iterable()) pool.join(raise_error=True) total_time = time.time() - start_time log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time)) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subpool_imap(pool_size, func, iterable, flatten=False, unordered=False, buffer_size=None): """ Generator version of subpool_map. Should be used with unordered=True for optimal performance """
from .context import get_current_job, set_current_job, log if not pool_size: for args in iterable: yield func(*args) counter = itertools_count() current_job = get_current_job() def inner_func(*args): """ As each call to 'func' will be done in a random greenlet of the subpool, we need to register their IDs with set_current_job() to make get_current_job() calls work properly inside 'func'. """ next(counter) if current_job: set_current_job(current_job) try: ret = func(*args) except Exception as exc: trace = traceback.format_exc() exc.subpool_traceback = trace raise if current_job: set_current_job(None) return ret def inner_iterable(): """ This will be called inside the pool's main greenlet, which ID also needs to be registered """ if current_job: set_current_job(current_job) for x in iterable: yield x if current_job: set_current_job(None) start_time = time.time() pool = gevent.pool.Pool(size=pool_size) if unordered: iterator = pool.imap_unordered(inner_func, inner_iterable(), maxsize=buffer_size or pool_size) else: iterator = pool.imap(inner_func, inner_iterable()) for x in iterator: if flatten: for y in x: yield y else: yield x pool.join(raise_error=True) total_time = time.time() - start_time log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _hash_task(task): """ Returns a unique hash for identify a task and its params """
params = task.get("params") if params: params = json.dumps(sorted(list(task["params"].items()), key=lambda x: x[0])) # pylint: disable=no-member full = [str(task.get(x)) for x in ["path", "interval", "dailytime", "weekday", "monthday", "queue"]] full.extend([str(params)]) return " ".join(full)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_config_integrity(self): """ Make sure the scheduler config is valid """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks} if len(tasks_by_hash) != len(self.config_tasks): raise Exception("Fatal error: there was a hash duplicate in the scheduled tasks config.") for h, task in tasks_by_hash.items(): if task.get("monthday") and not task.get("dailytime"): raise Exception("Fatal error: you can't schedule a task with 'monthday' and without 'dailytime' (%s)" % h) if task.get("weekday") and not task.get("dailytime"): raise Exception("Fatal error: you can't schedule a task with 'weekday' and without 'dailytime' (%s)" % h) if not task.get("monthday") and not task.get("weekday") and not task.get("dailytime") and not task.get("interval"): raise Exception("Fatal error: scheduler must be specified one of monthday,weekday,dailytime,interval. (%s)" % h)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sync_config_tasks(self): """ Performs the first sync of a list of tasks, often defined in the config file. """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks} for task in self.all_tasks: if tasks_by_hash.get(task["hash"]): del tasks_by_hash[task["hash"]] else: self.collection.remove({"_id": task["_id"]}) log.debug("Scheduler: deleted %s" % task["hash"]) # What remains are the new ones to be inserted for h, task in tasks_by_hash.items(): task["hash"] = h task["datelastqueued"] = datetime.datetime.fromtimestamp(0) if task.get("dailytime"): # Because MongoDB can store datetimes but not times, # we add today's date to the dailytime. # The date part will be discarded in check() task["dailytime"] = datetime.datetime.combine( datetime.datetime.utcnow(), task["dailytime"]) task["interval"] = 3600 * 24 # Avoid to queue task in check() if today dailytime is already passed if datetime.datetime.utcnow().time() > task["dailytime"].time(): task["datelastqueued"] = datetime.datetime.utcnow() self.collection.find_one_and_update({"hash": task["hash"]}, {"$set": task}, upsert=True) log.debug("Scheduler: added %s" % task["hash"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ratelimit(key, limit, per=1, redis=None): """ Returns an integer with the number of available actions for the current period in seconds. If zero, rate was already reached. """
if redis is None: redis = connections.redis # http://redis.io/commands/INCR now = int(time.time()) k = "ratelimit:%s:%s" % (key, now // per) with redis.pipeline(transaction=True) as pipeline: pipeline.incr(k, 1) pipeline.expire(k, per + 10) value = pipeline.execute() current = int(value[0]) - 1 if current >= limit: return 0 else: return limit - current
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def greenlet_logs(self): """ This greenlet always runs in background to update current logs in MongoDB every 10 seconds. Caution: it might get delayed when doing long blocking operations. Should we do this in a thread instead? """
while True: try: self.flush_logs() except Exception as e: # pylint: disable=broad-except self.log.error("When flushing logs: %s" % e) finally: time.sleep(self.config["report_interval"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh_queues(self, fatal=False): """ Updates the list of currently known queues and subqueues """
try: queues = [] prefixes = [q for q in self.config["queues"] if q.endswith("/")] known_subqueues = Queue.all_known(prefixes=prefixes) for q in self.config["queues"]: queues.append(Queue(q)) if q.endswith("/"): for subqueue in known_subqueues: if subqueue.startswith(q): queues.append(Queue(subqueue)) self.queues = queues except Exception as e: # pylint: disable=broad-except self.log.error("When refreshing subqueues: %s", e) if fatal: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_worker_report(self, with_memory=False): """ Returns a dict containing all the data we can about the current status of the worker and its jobs. """
greenlets = [] for greenlet in list(self.gevent_pool): g = {} short_stack = [] stack = traceback.format_stack(greenlet.gr_frame) for s in stack[1:]: if "/gevent/hub.py" in s: break short_stack.append(s) g["stack"] = short_stack job = get_current_job(id(greenlet)) if job: job.save() if job.data: g["path"] = job.data["path"] g["datestarted"] = job.datestarted g["id"] = str(job.id) g["time"] = getattr(greenlet, "_trace_time", 0) g["switches"] = getattr(greenlet, "_trace_switches", None) # pylint: disable=protected-access if job._current_io is not None: g["io"] = job._current_io greenlets.append(g) # When faking network latency, all sockets are affected, including OS ones, but # we still want reliable reports so this is disabled. if (not with_memory) or (self.config["add_network_latency"] != "0" and self.config["add_network_latency"]): cpu = { "user": 0, "system": 0, "percent": 0 } mem = {"rss": 0, "swap": 0, "total": 0} else: cpu_times = self.process.cpu_times() cpu = { "user": cpu_times.user, "system": cpu_times.system, "percent": self.process.cpu_percent(0) } mem = self.get_memory() # Avoid sharing passwords or sensitive config! whitelisted_config = [ "max_jobs", "max_memory" "greenlets", "processes", "queues", "dequeue_strategy", "scheduler", "name", "local_ip", "external_ip", "agent_id", "worker_group" ] io = None if self._traced_io: io = {} for k, v in iteritems(self._traced_io): if k == "total": io[k] = v else: io[k] = sorted(list(v.items()), reverse=True, key=lambda x: x[1]) used_pool_slots = len(self.gevent_pool) used_avg = self.pool_usage_average.next(used_pool_slots) return { "status": self.status, "config": {k: v for k, v in iteritems(self.config) if k in whitelisted_config}, "done_jobs": self.done_jobs, "usage_avg": used_avg / self.pool_size, "datestarted": self.datestarted, "datereported": datetime.datetime.utcnow(), "name": self.name, "io": io, "_id": str(self.id), "process": { "pid": self.process.pid, "cpu": cpu, "mem": mem # https://code.google.com/p/psutil/wiki/Documentation # get_open_files # get_connections # get_num_ctx_switches # get_num_fds # get_io_counters # get_nice }, "jobs": greenlets }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def greenlet_timeouts(self): """ This greenlet kills jobs in other greenlets if they timeout. """
while True: now = datetime.datetime.utcnow() for greenlet in list(self.gevent_pool): job = get_current_job(id(greenlet)) if job and job.timeout and job.datestarted: expires = job.datestarted + datetime.timedelta(seconds=job.timeout) if now > expires: job.kill(block=False, reason="timeout") time.sleep(1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait_for_idle(self): """ Waits until the worker has nothing more to do. Very useful in tests """
# Be mindful that this is being executed in a different greenlet than the work_* methods. while True: time.sleep(0.01) with self.work_lock: if self.status != "wait": continue if len(self.gevent_pool) > 0: continue # Force a refresh of the current subqueues, one might just have been created. self.refresh_queues() # We might be dequeueing a new subqueue. Double check that we don't have anything more to do outcome, dequeue_jobs = self.work_once(free_pool_slots=1, max_jobs=None) if outcome is "wait" and dequeue_jobs == 0: break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def work_once(self, free_pool_slots=1, max_jobs=None): """ Does one lookup for new jobs, inside the inner work loop """
dequeued_jobs = 0 available_queues = [ queue for queue in self.queues if queue.root_id not in self.paused_queues and queue.id not in self.paused_queues ] for queue_i in range(len(available_queues)): queue = available_queues[(queue_i + self.queue_offset) % len(available_queues)] max_jobs_per_queue = free_pool_slots - dequeued_jobs if max_jobs_per_queue <= 0: queue_i -= 1 break if self.config["dequeue_strategy"] == "parallel": max_jobs_per_queue = max(1, int(max_jobs_per_queue / (len(available_queues) - queue_i))) for job in queue.dequeue_jobs( max_jobs=max_jobs_per_queue, job_class=self.job_class, worker=self ): dequeued_jobs += 1 self.gevent_pool.spawn(self.perform_job, job) # At the next pass, start at the next queue to avoid always dequeuing the same one if self.config["dequeue_strategy"] == "parallel": self.queue_offset = (self.queue_offset + queue_i + 1) % len(self.queues) # TODO consider this when dequeuing jobs to have strict limits if max_jobs and self.done_jobs >= max_jobs: self.log.info("Reached max_jobs=%s" % self.done_jobs) return "break", dequeued_jobs # We seem to have exhausted available jobs, we can sleep for a # while. if dequeued_jobs == 0: if self.config["dequeue_strategy"] == "burst": self.log.info("Burst mode: stopping now because queues were empty") return "break", dequeued_jobs return "wait", dequeued_jobs return None, dequeued_jobs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def work_wait(self): """ Wait for new jobs to arrive """
if len(self.queues_with_notify) > 0: # https://github.com/antirez/redis/issues/874 connections.redis.blpop(*(self.queues_with_notify + [max(1, int(self.config["max_latency"]))])) else: gevent.sleep(self.config["max_latency"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize_job_ids(self, job_ids): """ Returns job_ids serialized for storage in Redis """
if len(job_ids) == 0 or self.use_large_ids: return job_ids elif isinstance(job_ids[0], ObjectId): return [x.binary for x in job_ids] else: return [bytes.fromhex(str(x)) for x in job_ids]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unserialize_job_ids(self, job_ids): """ Unserialize job_ids stored in Redis """
if len(job_ids) == 0 or self.use_large_ids: return job_ids else: return [binascii.hexlify(x.encode('utf-8') if (PY3 and isinstance(x, str)) else x).decode('ascii') for x in job_ids]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_active(cls): """ List active queues, based on their lengths in Redis. Warning, uses the unscalable KEYS redis command """
prefix = context.get_current_config()["redis_prefix"] queues = [] for key in context.connections.redis.keys(): if key.startswith(prefix): queues.append(Queue(key[len(prefix) + 3:])) return queues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all_known(cls, sources=None, prefixes=None): """ List all currently known queues """
sources = sources or ("config", "jobs", "raw_subqueues") queues = set() if "config" in sources and not prefixes: # Some queues are explicitly declared in the config (including all root raw queues) cfg = context.get_current_config() queues_from_config = [ t.get("queue") for t in (cfg.get("tasks") or {}).values() if t.get("queue") ] queues_from_config += Queue.get_queues_config().keys() queues_from_config += [ t.get("retry_queue") for t in Queue.get_queues_config().values() if t.get("retry_queue") ] queues |= set(queues_from_config) if "jobs" in sources: # This will get all queues from mongodb, including those where we have only non-queued jobs for q in context.connections.mongodb_jobs.mrq_jobs.distinct("queue"): if prefixes and not any(q.startswith(p) for p in prefixes): continue queues.add(q) if "raw_subqueues" in sources: for q in Queue.get_queues_config(): if prefixes and not any(q + "/" == p for p in prefixes): continue queue_obj = Queue(q) if queue_obj.is_raw and queue_obj.has_subqueues: # TODO: optimize this with a single SUNION on all keys queues |= queue_obj.get_known_subqueues() return queues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def all(cls): """ List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow. """
# Start with raw queues we know exist from the config queues = {x: 0 for x in Queue.get_queues_config()} stats = list(context.connections.mongodb_jobs.mrq_jobs.aggregate([ {"$match": {"status": "queued"}}, {"$group": {"_id": "$queue", "jobs": {"$sum": 1}}} ])) queues.update({x["_id"]: x["jobs"] for x in stats}) return queues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notify(self, new_jobs_count): """ We just queued new_jobs_count jobs on this queue, wake up the workers if needed """
if not self.use_notify(): return # Not really useful to send more than 100 notifs (to be configured) count = min(new_jobs_count, 100) notify_key = redis_key("notify", self) context.connections.redis.lpush(notify_key, *([1] * count)) context.connections.redis.expire(notify_key, max(1, int(context.get_current_config()["max_latency"] * 2)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def empty(self): """ Empty a queue. """
with context.connections.redis.pipeline(transaction=True) as pipe: pipe.delete(self.redis_key) pipe.delete(self.redis_key_known_subqueues) pipe.execute()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def enqueue_raw_jobs(self, params_list): """ Add Jobs to this queue with raw parameters. They are not yet in MongoDB. """
if len(params_list) == 0: return if self.is_subqueue: context.connections.redis.sadd(self.redis_key_known_subqueues, self.id) # ZSET if self.is_sorted: if not isinstance(params_list, dict) and self.is_timed: now = time.time() params_list = {x: now for x in params_list} context.connections.redis.zadd(self.redis_key, **params_list) # SET elif self.is_set: context.connections.redis.sadd(self.redis_key, *params_list) # LIST else: context.connections.redis.rpush(self.redis_key, *params_list) context.metric("queues.%s.enqueued" % self.id, len(params_list)) context.metric("queues.all.enqueued", len(params_list))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_raw_jobs(self, params_list): """ Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0: return # ZSET if self.is_sorted: context.connections.redis.zrem(self.redis_key, *iter(params_list)) # SET elif self.is_set: context.connections.redis.srem(self.redis_key, *params_list) else: # O(n)! Use with caution. for k in params_list: context.connections.redis.lrem(self.redis_key, 1, k) context.metric("queues.%s.removed" % self.id, len(params_list)) context.metric("queues.all.removed", len(params_list))