code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def sanitize(value): if isinstance(value, basestring): value = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, styles=ALLOWED_STYLES, strip=False) return value
Sanitizes strings according to SANITIZER_ALLOWED_TAGS, SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in settings. Example usage: {% load sanitizer %} {{ post.content|escape_html }}
def processor_for(content_model_or_slug, exact_page=False): content_model = None slug = "" if isinstance(content_model_or_slug, (str, _str)): try: parts = content_model_or_slug.split(".", 1) content_model = apps.get_model(*parts) except (TypeError, ValueError, LookupError): slug = content_model_or_slug elif issubclass(content_model_or_slug, Page): content_model = content_model_or_slug else: raise TypeError("%s is not a valid argument for page_processor, " "which should be a model subclass of Page in class " "or string form (app.model), or a valid slug" % content_model_or_slug) def decorator(func): parts = (func, exact_page) if content_model: model_name = content_model._meta.object_name.lower() processors[model_name].insert(0, parts) else: processors["slug:%s" % slug].insert(0, parts) return func return decorator
Decorator that registers the decorated function as a page processor for the given content model or slug. When a page exists that forms the prefix of custom urlpatterns in a project (eg: the blog page and app), the page will be added to the template context. Passing in ``True`` for the ``exact_page`` arg, will ensure that the page processor is not run in this situation, requiring that the loaded page object is for the exact URL currently being viewed.
def get_repository_notification_session(self, repository_receiver, proxy): if repository_receiver is None: raise NullArgument() if not self.supports_repository_notification(): raise Unimplemented() try: from . import sessions except ImportError: raise proxy = self._convert_proxy(proxy) try: session = sessions.RepositoryNotificationSession(repository_receiver, proxy, runtime=self._runtime) except AttributeError: raise return session
Gets the notification session for subscribing to changes to a repository. arg: repository_receiver (osid.repository.RepositoryReceiver): the notification callback arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.RepositoryNotificationSession) - a RepositoryNotificationSession raise: NullArgument - repository_receiver is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_notification() is false compliance: optional - This method must be implemented if supports_repository_notification() is true.
def avl_split_first(root): if root is None: raise IndexError('Empty tree has no maximum element') root, left, right = avl_release_kids(root) if left is None: new_root, first_node = right, root else: new_left, first_node = avl_split_first(left) new_root = avl_join(new_left, right, root) return (new_root, first_node)
Removes the minimum element from the tree Returns: tuple: new_root, first_node O(log(n)) = O(height(root))
def _next_ontology(self): currentfile = self.current['file'] try: idx = self.all_ontologies.index(currentfile) return self.all_ontologies[idx+1] except: return self.all_ontologies[0]
Dynamically retrieves the next ontology in the list
def set_reload_on_exception_params(self, do_reload=None, etype=None, evalue=None, erepr=None): self._set('reload-on-exception', do_reload, cast=bool) self._set('reload-on-exception-type', etype) self._set('reload-on-exception-value', evalue) self._set('reload-on-exception-repr', erepr) return self._section
Sets workers reload on exceptions parameters. :param bool do_reload: Reload a worker when an exception is raised. :param str etype: Reload a worker when a specific exception type is raised. :param str evalue: Reload a worker when a specific exception value is raised. :param str erepr: Reload a worker when a specific exception type+value (language-specific) is raised.
def cause_effect_info(self, mechanism, purview): return min(self.cause_info(mechanism, purview), self.effect_info(mechanism, purview))
Return the cause-effect information for a mechanism over a purview. This is the minimum of the cause and effect information.
def delete_device(self, device_id): api = self._get_api(device_directory.DefaultApi) return api.device_destroy(id=device_id)
Delete device from catalog. :param str device_id: ID of device in catalog to delete (Required) :return: void
def set_cursor(self, x, y): curses.curs_set(1) self.screen.move(y, x)
Sets the cursor to the desired position. :param x: X position :param y: Y position
def add(self, *tokens: str) -> None: from wdom.web_node import WdomElement _new_tokens = [] for token in tokens: self._validate_token(token) if token and token not in self: self._list.append(token) _new_tokens.append(token) if isinstance(self._owner, WdomElement) and _new_tokens: self._owner.js_exec('addClass', _new_tokens)
Add new tokens to list.
def fit(self, train_events, test_events, n_epoch=1): for e in train_events: self.__validate(e) self.rec.users[e.user.index]['known_items'].add(e.item.index) self.item_buffer.append(e.item.index) for e in test_events: self.__validate(e) self.item_buffer.append(e.item.index) self.__batch_update(train_events, test_events, n_epoch) for e in test_events: self.rec.users[e.user.index]['known_items'].add(e.item.index) self.rec.update(e)
Train a model using the first 30% positive events to avoid cold-start. Evaluation of this batch training is done by using the next 20% positive events. After the batch SGD training, the models are incrementally updated by using the 20% test events. Args: train_events (list of Event): Positive training events (0-30%). test_events (list of Event): Test events (30-50%). n_epoch (int): Number of epochs for the batch training.
def caution_title_header_element(feature, parent): _ = feature, parent header = caution_title_header['string_format'] return header.capitalize()
Retrieve caution title header string from definitions.
def _eval(self, node): try: handler = self.nodes[type(node)] except KeyError: raise ValueError("Sorry, {0} is not available in this evaluator".format(type(node).__name__)) return handler(node)
Evaluate a node :param node: Node to eval :return: Result of node
def get_normed_x(self, rows=None, cols=None): if rows is None: rows = list(range(0, self.get_sample_size())) if cols is None: cols = list(range(0, self.get_dimensionality())) if not hasattr(rows, "__iter__"): rows = [rows] rows = sorted(list(set(rows))) retValue = self.Xnorm[rows, :] return retValue[:, cols]
Returns the normalized input data requested by the user @ In, rows, a list of non-negative integers specifying the row indices to return @ In, cols, a list of non-negative integers specifying the column indices to return @ Out, a matrix of floating point values specifying the normalized data values used in internal computations filtered by the three input parameters.
def get_session_class(cls, interface_type, resource_class): try: return cls._session_classes[(interface_type, resource_class)] except KeyError: raise ValueError('No class registered for %s, %s' % (interface_type, resource_class))
Return the session class for a given interface type and resource class. :type interface_type: constants.InterfaceType :type resource_class: str :return: Session
def view_rect(self) -> QRectF: top_left = self.mapToScene(0, 0) bottom_right = self.mapToScene(self.viewport().width() - 1, self.viewport().height() - 1) return QRectF(top_left, bottom_right)
Return the boundaries of the view in scene coordinates
def start(self, reloading=False): for event in self.event_handlers: self.controller.listen(event)
Called when the module is loaded. If the load is due to a reload of the module, then the 'reloading' argument will be set to True. By default, this method calls the controller's listen() for each event in the self.event_handlers dict.
def lacp_timeout(self, **kwargs): int_type = kwargs.pop('int_type').lower() name = kwargs.pop('name') timeout = kwargs.pop('timeout') callback = kwargs.pop('callback', self._callback) int_types = [ 'gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet' ] if int_type not in int_types: raise ValueError("Incorrect int_type value.") valid_timeouts = ['long', 'short'] if timeout not in valid_timeouts: raise ValueError("Incorrect timeout value") timeout_args = dict(name=name, timeout=timeout) if not pynos.utilities.valid_interface(int_type, name): raise ValueError("Incorrect name value.") config = getattr( self._interface, 'interface_%s_lacp_timeout' % int_type )(**timeout_args) return callback(config)
Set lacp timeout. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, etc) timeout (str): Timeout length. (short, long) name (str): Name of interface. (1/0/5, 1/0/10, etc) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `int_type`, `name`, or `timeout` is not specified. ValueError: if `int_type`, `name`, or `timeout is not valid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> int_type = 'tengigabitethernet' >>> name = '225/0/39' >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.channel_group(name=name, ... int_type=int_type, port_int='1', ... channel_type='standard', mode='active') ... output = dev.interface.lacp_timeout(name=name, ... int_type=int_type, timeout='long') ... dev.interface.lacp_timeout() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
def println(self, text=""): if self.word_wrap: directives = ansi_color.find_directives(text, self) clean_text = ansi_color.strip_ansi_codes(text) clean_lines = self.tw.wrap(clean_text) index = 0 for line in clean_lines: line_length = len(line) y = 0 while y < line_length: method, args = directives[index] if method == self.putch: y += 1 method(*args) index += 1 self.newline() else: self.puts(text) self.newline()
Prints the supplied text to the device, scrolling where necessary. The text is always followed by a newline. :param text: The text to print. :type text: str
def predict_mappings(self, mappings): if self.nat_type not in self.predictable_nats: msg = "Can't predict mappings for non-predictable NAT type." raise Exception(msg) for mapping in mappings: mapping["bound"] = mapping["sock"].getsockname()[1] if self.nat_type == "preserving": mapping["remote"] = mapping["source"] if self.nat_type == "delta": max_port = 65535 mapping["remote"] = int(mapping["source"]) + self.delta if mapping["remote"] > max_port: mapping["remote"] -= max_port if mapping["remote"] < 0: mapping["remote"] = max_port - -mapping["remote"] if mapping["remote"] < 1 or mapping["remote"] > max_port: mapping["remote"] = 1 mapping["remote"] = str(mapping["remote"]) return mappings
This function is used to predict the remote ports that a NAT will map a local connection to. It requires the NAT type to be determined before use. Current support for preserving and delta type mapping behaviour.
def refresh(self): url = CONST.AUTOMATION_ID_URL url = url.replace( '$AUTOMATIONID$', self.automation_id) response = self._abode.send_request(method="get", url=url) response_object = json.loads(response.text) if isinstance(response_object, (tuple, list)): response_object = response_object[0] if str(response_object['id']) != self.automation_id: raise AbodeException((ERROR.INVALID_AUTOMATION_REFRESH_RESPONSE)) self.update(response_object)
Refresh the automation.
def parse_poi_query(north, south, east, west, amenities=None, timeout=180, maxsize=''): if amenities: query_template = ('[out:json][timeout:{timeout}]{maxsize};((node["amenity"~"{amenities}"]({south:.6f},' '{west:.6f},{north:.6f},{east:.6f});(._;>;););(way["amenity"~"{amenities}"]({south:.6f},' '{west:.6f},{north:.6f},{east:.6f});(._;>;););(relation["amenity"~"{amenities}"]' '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);););out;') query_str = query_template.format(amenities="|".join(amenities), north=north, south=south, east=east, west=west, timeout=timeout, maxsize=maxsize) else: query_template = ('[out:json][timeout:{timeout}]{maxsize};((node["amenity"]({south:.6f},' '{west:.6f},{north:.6f},{east:.6f});(._;>;););(way["amenity"]({south:.6f},' '{west:.6f},{north:.6f},{east:.6f});(._;>;););(relation["amenity"]' '({south:.6f},{west:.6f},{north:.6f},{east:.6f});(._;>;);););out;') query_str = query_template.format(north=north, south=south, east=east, west=west, timeout=timeout, maxsize=maxsize) return query_str
Parse the Overpass QL query based on the list of amenities. Parameters ---------- north : float Northernmost coordinate from bounding box of the search area. south : float Southernmost coordinate from bounding box of the search area. east : float Easternmost coordinate from bounding box of the search area. west : float Westernmost coordinate of the bounding box of the search area. amenities : list List of amenities that will be used for finding the POIs from the selected area. timeout : int Timeout for the API request.
def mkdir(self, paths, create_parent=False, mode=0o755): if not isinstance(paths, list): raise InvalidInputException("Paths should be a list") if not paths: raise InvalidInputException("mkdirs: no path given") for path in paths: if not path.startswith("/"): path = self._join_user_path(path) fileinfo = self._get_file_info(path) if not fileinfo: try: request = client_proto.MkdirsRequestProto() request.src = path request.masked.perm = mode request.createParent = create_parent response = self.service.mkdirs(request) yield {"path": path, "result": response.result} except RequestError as e: yield {"path": path, "result": False, "error": str(e)} else: yield {"path": path, "result": False, "error": "mkdir: `%s': File exists" % path}
Create a directoryCount :param paths: Paths to create :type paths: list of strings :param create_parent: Also create the parent directories :type create_parent: boolean :param mode: Mode the directory should be created with :type mode: int :returns: a generator that yields dictionaries
def reprcall(name, args=(), kwargs=(), keywords='', sep=', ', argfilter=repr): if keywords: keywords = ((', ' if (args or kwargs) else '') + '**' + keywords) argfilter = argfilter or repr return "{name}({args}{sep}{kwargs}{keywords})".format( name=name, args=reprargs(args, filter=argfilter), sep=(args and kwargs) and sep or "", kwargs=reprkwargs(kwargs, sep), keywords=keywords or '')
Format a function call for display.
def publish(self): while True: event = yield from self.event_source.get() str_buffer = [] if event == POISON_PILL: return if isinstance(event, str): str_buffer.append(event) elif event.type == EventTypes.BLOCK_VALID: str_buffer = map(json.dumps, eventify_block(event.data)) for str_item in str_buffer: for _, websocket in self.subscribers.items(): yield from websocket.send_str(str_item)
Publish new events to the subscribers.
def get_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'): params = {'Attribute' : attribute} if snapshot_id: params['SnapshotId'] = snapshot_id return self.get_object('DescribeSnapshotAttribute', params, SnapshotAttribute, verb='POST')
Get information about an attribute of a snapshot. Only one attribute can be specified per call. :type snapshot_id: str :param snapshot_id: The ID of the snapshot. :type attribute: str :param attribute: The requested attribute. Valid values are: * createVolumePermission :rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute` :return: The requested Snapshot attribute
def retrieve_file_from_url(url): try: alias_source, _ = urlretrieve(url) with open(alias_source, 'r') as f: content = f.read() if content[:3].isdigit(): raise CLIError(ALIAS_FILE_URL_ERROR.format(url, content.strip())) except Exception as exception: if isinstance(exception, CLIError): raise raise CLIError(ALIAS_FILE_URL_ERROR.format(url, exception)) return alias_source
Retrieve a file from an URL Args: url: The URL to retrieve the file from. Returns: The absolute path of the downloaded file.
def _address_content(self, x): mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size].
def get_cpu_state(self): state = c_int(0) self.library.Cli_GetPlcStatus(self.pointer,byref(state)) try: status_string = cpu_statuses[state.value] except KeyError: status_string = None if not status_string: raise Snap7Exception("The cpu state (%s) is invalid" % state.value) logger.debug("CPU state is %s" % status_string) return status_string
Retrieves CPU state from client
def intersect(a, b): if a[x0] == a[x1] or a[y0] == a[y1]: return False if b[x0] == b[x1] or b[y0] == b[y1]: return False return a[x0] <= b[x1] and b[x0] <= a[x1] and a[y0] <= b[y1] and b[y0] <= a[y1]
Check if two rectangles intersect
def check_enable_mode(self, check_string=""): self.write_channel(self.RETURN) output = self.read_until_prompt() return check_string in output
Check if in enable mode. Return boolean. :param check_string: Identification of privilege mode from device :type check_string: str
def noninteractive_changeset_update(self, fqn, template, old_parameters, parameters, stack_policy, tags, **kwargs): logger.debug("Using noninterative changeset provider mode " "for %s.", fqn) _changes, change_set_id = create_change_set( self.cloudformation, fqn, template, parameters, tags, 'UPDATE', service_role=self.service_role, **kwargs ) self.deal_with_changeset_stack_policy(fqn, stack_policy) self.cloudformation.execute_change_set( ChangeSetName=change_set_id, )
Update a Cloudformation stack using a change set. This is required for stacks with a defined Transform (i.e. SAM), as the default update_stack API cannot be used with them. Args: fqn (str): The fully qualified name of the Cloudformation stack. template (:class:`stacker.providers.base.Template`): A Template object to use when updating the stack. old_parameters (list): A list of dictionaries that defines the parameter list on the existing Cloudformation stack. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack.
def female_vulnerability_section_header_element(feature, parent): _ = feature, parent header = female_vulnerability_section_header['string_format'] return header.capitalize()
Retrieve female vulnerability section header string from definitions.
def get_network_id(self, is_full: bool = False) -> int: payload = self.generate_json_rpc_payload(RpcMethod.GET_NETWORK_ID) response = self.__post(self.__url, payload) if is_full: return response return response['result']
This interface is used to get the network id of current network. Return: the network id of current network.
def calculated_intervals(self): if self._calculated_intervals is None: logging.debug("get calculated intervals") self.load() return self.mongo_model.get_calculated_intervals() return self._calculated_intervals
Gets the calculated intervals from the database :return: The calculated intervals
def _get_bandfilenames(self): for band in self.bandnames: LOG.debug("Band = %s", str(band)) self.filenames[band] = os.path.join(self.path, self.options[self.platform_name + '-' + self.instrument][band]) LOG.debug(self.filenames[band]) if not os.path.exists(self.filenames[band]): LOG.warning("Couldn't find an existing file for this band: %s", str(self.filenames[band]))
Get the instrument rsr filenames
async def get_pinstate_report(self, command): pin = int(command[0]) value = await self.core.get_pin_state(pin) if value: reply = json.dumps({"method": "pin_state_reply", "params": value}) else: reply = json.dumps({"method": "pin_state_reply", "params": "Unknown"}) await self.websocket.send(reply)
This method retrieves a Firmata pin_state report for a pin.. See: http://firmata.org/wiki/Protocol#Pin_State_Query :param command: {"method": "get_pin_state", "params": [PIN]} :returns: {"method": "get_pin_state_reply", "params": [PIN_NUMBER, PIN_MODE, PIN_STATE]}
def remember_identity(self, subject, authc_token, account_id): try: identifiers = self.get_identity_to_remember(subject, account_id) except AttributeError: msg = "Neither account_id nor identifier arguments passed" raise AttributeError(msg) encrypted = self.convert_identifiers_to_bytes(identifiers) self.remember_encrypted_identity(subject, encrypted)
Yosai consolidates rememberIdentity, an overloaded method in java, to a method that will use an identifier-else-account logic. Remembers a subject-unique identity for retrieval later. This implementation first resolves the exact identifying attributes to remember. It then remembers these identifying attributes by calling remember_identity(Subject, IdentifierCollection) :param subject: the subject for which the identifying attributes are being remembered :param authc_token: ignored in the AbstractRememberMeManager :param account_id: the account id of authenticated account
def version(msg): tc = typecode(msg) if tc != 31: raise RuntimeError("%s: Not a status operation message, expecting TC = 31" % msg) msgbin = common.hex2bin(msg) version = common.bin2int(msgbin[72:75]) return version
ADS-B Version Args: msg (string): 28 bytes hexadecimal message string, TC = 31 Returns: int: version number
def setup_zmq(self): self.context = zmq.Context() self.push = self.context.socket(zmq.PUSH) self.push_port = self.push.bind_to_random_port("tcp://%s" % self.host) eventlet.spawn(self.zmq_pull) eventlet.sleep(0)
Set up a PUSH and a PULL socket. The PUSH socket will push out requests to the workers. The PULL socket will receive responses from the workers and reply through the server socket.
def count_channels(self): merge = self.index['merge'] if len(self.idx_chan.selectedItems()) > 1: if merge.isEnabled(): return else: merge.setEnabled(True) else: self.index['merge'].setCheckState(Qt.Unchecked) self.index['merge'].setEnabled(False)
If more than one channel selected, activate merge checkbox.
def cmd_wp_undo(self): if self.undo_wp_idx == -1 or self.undo_wp is None: print("No undo information") return wp = self.undo_wp if self.undo_type == 'move': wp.target_system = self.target_system wp.target_component = self.target_component self.loading_waypoints = True self.loading_waypoint_lasttime = time.time() self.master.mav.mission_write_partial_list_send(self.target_system, self.target_component, self.undo_wp_idx, self.undo_wp_idx) self.wploader.set(wp, self.undo_wp_idx) print("Undid WP move") elif self.undo_type == 'remove': self.wploader.insert(self.undo_wp_idx, wp) self.fix_jumps(self.undo_wp_idx, 1) self.send_all_waypoints() print("Undid WP remove") else: print("bad undo type") self.undo_wp = None self.undo_wp_idx = -1
handle wp undo
def _is_lang_change(self, request): if 'lang' not in request.GET: return False return not any(request.path.endswith(url) for url in self.exempt_urls)
Return True if the lang param is present and URL isn't exempt.
def get_all_mfa_devices(self, user_name, marker=None, max_items=None): params = {'UserName' : user_name} if marker: params['Marker'] = marker if max_items: params['MaxItems'] = max_items return self.get_response('ListMFADevices', params, list_marker='MFADevices')
Get all MFA devices associated with an account. :type user_name: string :param user_name: The username of the user :type marker: string :param marker: Use this only when paginating results and only in follow-up request after you've received a response where the results are truncated. Set this to the value of the Marker element in the response you just received. :type max_items: int :param max_items: Use this only when paginating results to indicate the maximum number of groups you want in the response.
def set_major(self): old_version = self.get_version() new_version = str(int(old_version.split('.', 5)[0])+1) + '.0.0' self.set_version(old_version, new_version)
Increment the major number of project
def register_binary_type(content_type, dumper, loader): content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_bytes = dumper handler.bytes_to_dict = loader
Register handling for a binary content type. :param str content_type: content type to register the hooks for :param dumper: called to decode bytes into a dictionary. Calling convention: ``dumper(obj_dict) -> bytes``. :param loader: called to encode a dictionary into a byte string. Calling convention: ``loader(obj_bytes) -> dict``
def execute(self): self.print_info() if self._config.command_args.get('destroy') == 'never': msg = "Skipping, '--destroy=never' requested." LOG.warn(msg) return if self._config.driver.delegated and not self._config.driver.managed: msg = 'Skipping, instances are delegated.' LOG.warn(msg) return self._config.provisioner.destroy() self._config.state.reset()
Execute the actions necessary to perform a `molecule destroy` and returns None. :return: None
async def get_current_position(self, refresh=True) -> dict: if refresh: await self.refresh() position = self._raw_data.get(ATTR_POSITION_DATA) return position
Return the current shade position. :param refresh: If True it queries the hub for the latest info. :return: Dictionary with position data.
def types(self): return [x for x in self[self.current_scope].values() if isinstance(x, symbols.TYPE)]
Returns symbol instances corresponding to type declarations within the current scope.
def set_scenemode(self, scenemode='auto'): if scenemode not in self.available_settings['scenemode']: _LOGGER.debug('%s is not a valid scenemode', scenemode) return False return self.change_setting('scenemode', scenemode)
Set the video scene mode. Return a coroutine.
def get_by_id(cls, record_id, execute=True): query = cls.base_query().where(cls.id == record_id) if execute: return query.get() return query
Return a single instance of the model queried by ID. Args: record_id (int): Integer representation of the ID to query on. Keyword Args: execute (bool, optional): Should this method execute the query or return a query object for further manipulation? Returns: cls | :py:class:`peewee.SelectQuery`: If ``execute`` is ``True``, the query is executed, otherwise a query is returned. Raises: :py:class:`peewee.DoesNotExist`: Raised if a record with that ID doesn't exist.
def exitContainer(self): try: entry = self._compoundStack.pop() except IndexError: return container = self.currentContainer() entry.setQuery(container.query()) self.slideInPrev()
Removes the current query container.
def add_ops(op_classes): def f(cls): for op_attr_name, op_class in op_classes.items(): ops = getattr(cls, '{name}_ops'.format(name=op_attr_name)) ops_map = getattr(cls, '{name}_op_nodes_map'.format( name=op_attr_name)) for op in ops: op_node = ops_map[op] if op_node is not None: made_op = _op_maker(op_class, op) setattr(cls, 'visit_{node}'.format(node=op_node), made_op) return cls return f
Decorator to add default implementation of ops.
def scan(self, callback=None): def scan_finished(): time.sleep(3) logging.info('Scan finished') self._nb_of_modules_loaded = 0 def module_loaded(): self._nb_of_modules_loaded += 1 if self._nb_of_modules_loaded >= len(self._modules): callback() for module in self._modules: self._modules[module].load(module_loaded) for address in range(0, 256): message = velbus.ModuleTypeRequestMessage(address) if address == 255: self.send(message, scan_finished) else: self.send(message)
Scan the bus and call the callback when a new module is discovered :return: None
def info(self): buf = _C.array_int32(_C.H4_MAX_VAR_DIMS) status, sds_name, rank, data_type, n_attrs = \ _C.SDgetinfo(self._id, buf) _checkErr('info', status, "cannot execute") dim_sizes = _array_to_ret(buf, rank) return sds_name, rank, dim_sizes, data_type, n_attrs
Retrieves information about the dataset. Args:: no argument Returns:: 5-element tuple holding: - dataset name - dataset rank (number of dimensions) - dataset shape, that is a list giving the length of each dataset dimension; if the first dimension is unlimited, then the first value of the list gives the current length of the unlimited dimension - data type (one of the SDC.xxx values) - number of attributes defined for the dataset C library equivalent : SDgetinfo
async def _get_async(self, url, session): data = None async with session.get(url) as resp: if resp.status == 200: data = await resp.json() return data
Asynchronous internal method used for GET requests Args: url (str): URL to fetch session (obj): aiohttp client session for async loop Returns: data (obj): Individual URL request's response corountine
def fix_e711(self, result): (line_index, offset, target) = get_index_offset_contents(result, self.source) right_offset = offset + 2 if right_offset >= len(target): return [] left = target[:offset].rstrip() center = target[offset:right_offset] right = target[right_offset:].lstrip() if not right.startswith('None'): return [] if center.strip() == '==': new_center = 'is' elif center.strip() == '!=': new_center = 'is not' else: return [] self.source[line_index] = ' '.join([left, new_center, right])
Fix comparison with None.
def listify(generator_func): def list_func(*args, **kwargs): return degenerate(generator_func(*args, **kwargs)) return list_func
Converts generator functions into list returning functions. @listify def test(): yield 1 test() # => [1]
def get_output_mode(output, mode): if mode != 'auto': try: return switch_output_mode_auto[mode] except KeyError: raise ValueError('Mode "{}" is not supported.') extension = output.split('.')[-1] try: return switch_output_mode[extension] except KeyError: return intermediary_to_schema
From the output name and the mode returns a the function that will transform the intermediary representation to the output.
def call_multiple_modules(module_gen): for args_seq in module_gen: module_name_or_path = args_seq[0] with replace_sys_args(args_seq): if re.match(VALID_PACKAGE_RE, module_name_or_path): runpy.run_module(module_name_or_path, run_name='__main__') else: runpy.run_path(module_name_or_path, run_name='__main__')
Call each module module_gen should be a iterator
def abort(code, error=None, message=None): if error is None: flask_abort(code) elif isinstance(error, Response): error.status_code = code flask_abort(code, response=error) else: body = { "status": code, "error": error, "message": message } flask_abort(code, response=export(body, code))
Abort with suitable error response Args: code (int): status code error (str): error symbol or flask.Response message (str): error message
def annotation_spec_path(cls, project, location, dataset, annotation_spec): return google.api_core.path_template.expand( "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, )
Return a fully-qualified annotation_spec string.
def get_by_instance(self, instance): for model, registry in self.items(): try: instance_class = model._meta.get_field('instance').remote_field.model if isinstance(instance, instance_class): return registry except FieldDoesNotExist: pass return None
Return a preference registry using a model instance
def _addPeptide(self, sequence, proteinId, digestInfo): stdSequence = self.getStdSequence(sequence) if stdSequence not in self.peptides: self.peptides[stdSequence] = PeptideEntry( stdSequence, mc=digestInfo['missedCleavage'] ) if sequence not in self.peptides: self.peptides[sequence] = self.peptides[stdSequence] if proteinId not in self.peptides[stdSequence].proteins: self.peptides[stdSequence].proteins.add(proteinId) self.peptides[stdSequence].proteinPositions[proteinId] = ( digestInfo['startPos'], digestInfo['endPos'] ) self.proteins[proteinId].peptides.add(sequence)
Add a peptide to the protein database. :param sequence: str, amino acid sequence :param proteinId: str, proteinId :param digestInfo: dict, contains information about the in silico digest must contain the keys 'missedCleavage', 'startPos' and 'endPos'
def get(self, user_id): user = db.User.find_one(User.user_id == user_id) roles = db.Role.all() if not user: return self.make_response('Unable to find the user requested, might have been removed', HTTP.NOT_FOUND) return self.make_response({ 'user': user.to_json(), 'roles': roles }, HTTP.OK)
Returns a specific user
def api_key_from_file(url): path = os.path.expanduser('~/.config/python-bugzilla/bugzillarc') cfg = SafeConfigParser() cfg.read(path) domain = urlparse(url)[1] if domain not in cfg.sections(): return None if not cfg.has_option(domain, 'api_key'): return None return cfg.get(domain, 'api_key')
Check bugzillarc for an API key for this Bugzilla URL.
def start_end_from_segments(segment_file): from glue.ligolw.ligolw import LIGOLWContentHandler as h; lsctables.use_in(h) indoc = ligolw_utils.load_filename(segment_file, False, contenthandler=h) segment_table = table.get_table(indoc, lsctables.SegmentTable.tableName) start = numpy.array(segment_table.getColumnByName('start_time')) start_ns = numpy.array(segment_table.getColumnByName('start_time_ns')) end = numpy.array(segment_table.getColumnByName('end_time')) end_ns = numpy.array(segment_table.getColumnByName('end_time_ns')) return start + start_ns * 1e-9, end + end_ns * 1e-9
Return the start and end time arrays from a segment file. Parameters ---------- segment_file: xml segment file Returns ------- start: numpy.ndarray end: numpy.ndarray
def load_txt_to_sql(tbl_name, src_file_and_path, src_file, op_folder): if op_folder == '': pth = '' else: pth = op_folder + os.sep fname_create_script = pth + 'CREATE_' + tbl_name + '.SQL' fname_backout_file = pth + 'BACKOUT_' + tbl_name + '.SQL' fname_control_file = pth + tbl_name + '.CTL' cols = read_csv_cols_to_table_cols(src_file) create_script_staging_table(fname_create_script, tbl_name, cols) create_file(fname_backout_file, 'DROP TABLE ' + tbl_name + ' CASCADE CONSTRAINTS;\n') create_CTL(fname_control_file, tbl_name, cols, 'TRUNCATE')
creates a SQL loader script to load a text file into a database and then executes it. Note that src_file is
def __calculate_always_decrease_rw_values( table_name, read_units, provisioned_reads, write_units, provisioned_writes): if read_units <= provisioned_reads and write_units <= provisioned_writes: return (read_units, write_units) if read_units < provisioned_reads: logger.info( '{0} - Reads could be decreased, but we are waiting for ' 'writes to get lower than the threshold before ' 'scaling down'.format(table_name)) read_units = provisioned_reads elif write_units < provisioned_writes: logger.info( '{0} - Writes could be decreased, but we are waiting for ' 'reads to get lower than the threshold before ' 'scaling down'.format(table_name)) write_units = provisioned_writes return (read_units, write_units)
Calculate values for always-decrease-rw-together This will only return reads and writes decreases if both reads and writes are lower than the current provisioning :type table_name: str :param table_name: Name of the DynamoDB table :type read_units: int :param read_units: New read unit provisioning :type provisioned_reads: int :param provisioned_reads: Currently provisioned reads :type write_units: int :param write_units: New write unit provisioning :type provisioned_writes: int :param provisioned_writes: Currently provisioned writes :returns: (int, int) -- (reads, writes)
def head(self, n=None): if n is None: rs = self.head(1) return rs[0] if rs else None return self.take(n)
Returns the first ``n`` rows. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. :param n: int, default 1. Number of rows to return. :return: If n is greater than 1, return a list of :class:`Row`. If n is 1, return a single Row. >>> df.head() Row(age=2, name=u'Alice') >>> df.head(1) [Row(age=2, name=u'Alice')]
def translate(patterns, flags): positive = [] negative = [] if isinstance(patterns, (str, bytes)): patterns = [patterns] flags |= _TRANSLATE for pattern in patterns: for expanded in expand_braces(pattern, flags): (negative if is_negative(expanded, flags) else positive).append( WcParse(expanded, flags & FLAG_MASK).parse() ) if patterns and flags & REALPATH and negative and not positive: positive.append(_compile(b'**' if isinstance(patterns[0], bytes) else '**', flags)) return positive, negative
Translate patterns.
def value(self, raw_value): try: return base64.b64decode(bytes(raw_value, 'utf-8')).decode('utf-8') except binascii.Error as err: raise ValueError(str(err))
Decode param with Base64.
def referenced_by(self): href = fetch_entry_point('references_by_element') return [Element.from_meta(**ref) for ref in self.make_request( method='create', href=href, json={'value': self.href})]
Show all references for this element. A reference means that this element is being used, for example, in a policy rule, as a member of a group, etc. :return: list referenced elements :rtype: list(Element)
def get_intent(self, name, language_code=None, intent_view=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): if 'get_intent' not in self._inner_api_calls: self._inner_api_calls[ 'get_intent'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_intent, default_retry=self._method_configs['GetIntent'].retry, default_timeout=self._method_configs['GetIntent'].timeout, client_info=self._client_info, ) request = intent_pb2.GetIntentRequest( name=name, language_code=language_code, intent_view=intent_view, ) return self._inner_api_calls['get_intent']( request, retry=retry, timeout=timeout, metadata=metadata)
Retrieves the specified intent. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.IntentsClient() >>> >>> name = client.intent_path('[PROJECT]', '[INTENT]') >>> >>> response = client.get_intent(name) Args: name (str): Required. The name of the intent. Format: ``projects/<Project ID>/agent/intents/<Intent ID>``. language_code (str): Optional. The language to retrieve training phrases, parameters and rich messages for. If not specified, the agent's default language is used. [More than a dozen languages](https://dialogflow.com/docs/reference/language) are supported. Note: languages must be enabled in the agent, before they can be used. intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types.Intent` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def load_addon_packages(self, config_data): section_config = config_data.get("config") if not isinstance(section_config, dict): if section_config is None: return raise ConfigurationError( "'config' is %s instead of dict" % ( type(section_config), ), ) section_addons = section_config.get("addons", []) if not isinstance(section_addons, list): raise ConfigurationError( "'config.addons' is %s instead of list" % ( type(section_addons), ), ) for index, module_path in enumerate(section_addons): if not isinstance(module_path, six.text_type): raise ConfigurationError( "Item %d in 'config.addons' is %s instead of string" % ( index, type(module_path), ), ) self.addon_packages = list(section_addons)
Loads the module paths from which the configuration will attempt to load sanitizers from. These must be stored as a list of strings under "config.addons" section of the configuration data. :param config_data: Already parsed configuration data, as dictionary. :type config_data: dict[str,any]
def split_at_offsets(line, offsets): result = [] previous_offset = 0 current_offset = 0 for current_offset in sorted(offsets): if current_offset < len(line) and previous_offset != current_offset: result.append(line[previous_offset:current_offset].strip()) previous_offset = current_offset result.append(line[current_offset:]) return result
Split line at offsets. Return list of strings.
def prepare_time_micros(data, schema): if isinstance(data, datetime.time): return long(data.hour * MCS_PER_HOUR + data.minute * MCS_PER_MINUTE + data.second * MCS_PER_SECOND + data.microsecond) else: return data
Convert datetime.time to int timestamp with microseconds
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'): source_string = open(source, 'r').read() return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=group_marker_closing)
Read a string from a file and derive a ``Graph`` from it. This is a convenience function for opening a file and passing its contents to ``Graph.from_string()`` (see that for more detail) Args: source (str): the file to read and derive the graph from distance_weights (dict): dict of relative indices corresponding with word weights. See ``Graph.from_string`` for more detail. merge_same_words (bool): whether nodes which have the same value should be merged or not. group_marker_opening (str): The string used to mark the beginning of word groups. group_marker_closing (str): The string used to mark the end of word groups. Returns: Graph Example: >>> graph = Graph.from_file('cage.txt') # doctest: +SKIP >>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP 'poetry i have nothing to say and i'
def _get_vrf_name(self, ri): router_id = ri.router_name()[:self.DEV_NAME_LEN] is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region if is_multi_region_enabled: region_id = cfg.CONF.multi_region.region_id vrf_name = "%s-%s" % (router_id, region_id) else: vrf_name = router_id return vrf_name
overloaded method for generating a vrf_name that supports region_id
def run(self): pydocstyle.Error.explain = self.options['explain'] filename, source = load_file(self.filename) for error in pydocstyle.PEP257Checker().check_source(source, filename): if not hasattr(error, 'code') or ignore(error.code): continue lineno = error.line offset = 0 explanation = error.explanation if pydocstyle.Error.explain else '' text = '{0} {1}{2}'.format(error.code, error.message.split(': ', 1)[1], explanation) yield lineno, offset, text, Main
Run analysis on a single file.
def get_context_manager(self, default): try: self.stack.append(default) yield default finally: if self.enforce_nesting: if self.stack[-1] is not default: raise AssertionError( "Nesting violated for default stack of %s objects" % type(default)) self.stack.pop() else: self.stack.remove(default)
A context manager for manipulating a default stack.
def list_sqs(region, filter_by_kwargs): conn = boto.sqs.connect_to_region(region) queues = conn.get_all_queues() return lookup(queues, filter_by=filter_by_kwargs)
List all SQS Queues.
def insert(self, index: int, item: object) -> None: self._blueprints.insert(index, item)
The Abstract class `MutableSequence` leverages this insert method to perform the `BlueprintGroup.append` operation. :param index: Index to use for removing a new Blueprint item :param item: New `Blueprint` object. :return: None
def loaded(self, request, *args, **kwargs): serializer = self.get_serializer(list(Pack.objects.all()), many=True) return Response(serializer.data)
Return a list of loaded Packs.
def add_bucket(self, bucket, bucket_type=None): if not riak.disable_list_exceptions: raise riak.ListError() self._input_mode = 'bucket' if isinstance(bucket, riak.RiakBucket): if bucket.bucket_type.is_default(): self._inputs = {'bucket': bucket.name} else: self._inputs = {'bucket': [bucket.bucket_type.name, bucket.name]} elif bucket_type is not None and bucket_type != "default": self._inputs = {'bucket': [bucket_type, bucket]} else: self._inputs = {'bucket': bucket} return self
Adds all keys in a bucket to the inputs. :param bucket: the bucket :type bucket: string :param bucket_type: Optional name of a bucket type :type bucket_type: string, None :rtype: :class:`RiakMapReduce`
def get_summary(self, dataset_number=None, use_dfsummary_made=False): dataset_number = self._validate_dataset_number(dataset_number) if dataset_number is None: self._report_empty_dataset() return None test = self.get_dataset(dataset_number) if use_dfsummary_made: dfsummary_made = test.dfsummary_made else: dfsummary_made = True if not dfsummary_made: warnings.warn("Summary is not made yet") return None else: self.logger.info("returning datasets[test_no].dfsummary") return test.dfsummary
Retrieve summary returned as a pandas DataFrame.
def load_config(self, config): self.config = copy_config(config, self.mutable_config_keys) if 'cookiejar_cookies' in config['state']: self.cookies = CookieManager.from_cookie_list( config['state']['cookiejar_cookies'])
Configure grab instance with external config object.
def addDtdEntity(self, name, type, ExternalID, SystemID, content): ret = libxml2mod.xmlAddDtdEntity(self._o, name, type, ExternalID, SystemID, content) if ret is None:raise treeError('xmlAddDtdEntity() failed') __tmp = xmlEntity(_obj=ret) return __tmp
Register a new entity for this document DTD external subset.
def loadmat(path): r data = pkgutil.get_data('pygsp', 'data/' + path + '.mat') data = io.BytesIO(data) return scipy.io.loadmat(data)
r""" Load a matlab data file. Parameters ---------- path : string Path to the mat file from the data folder, without the .mat extension. Returns ------- data : dict dictionary with variable names as keys, and loaded matrices as values. Examples -------- >>> from pygsp import utils >>> data = utils.loadmat('pointclouds/bunny') >>> data['bunny'].shape (2503, 3)
def resolve_reference(target_reference, project): assert isinstance(target_reference, basestring) assert isinstance(project, ProjectTarget) split = _re_separate_target_from_properties.match (target_reference) if not split: raise BaseException ("Invalid reference: '%s'" % target_reference) id = split.group (1) sproperties = [] if split.group (3): sproperties = property.create_from_strings(feature.split(split.group(3))) sproperties = feature.expand_composites(sproperties) target = project.find (id) return (target, property_set.create(sproperties))
Given a target_reference, made in context of 'project', returns the AbstractTarget instance that is referred to, as well as properties explicitly specified for this reference.
def handle_error(self, request, client_address): del request exc_info = sys.exc_info() e = exc_info[1] if isinstance(e, IOError) and e.errno == errno.EPIPE: logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address)) else: logger.error('HTTP serving error', exc_info=exc_info)
Override to get rid of noisy EPIPE errors.
def make_association_id(definedby, sub, pred, obj, attributes=None): items_to_hash = [definedby, sub, pred, obj] if attributes is not None and len(attributes) > 0: items_to_hash += attributes items_to_hash = [x for x in items_to_hash if x is not None] assoc_id = ':'.join(('MONARCH', GraphUtils.digest_id('+'.join(items_to_hash)))) assert assoc_id is not None return assoc_id
A method to create unique identifiers for OBAN-style associations, based on all the parts of the association If any of the items is empty or None, it will convert it to blank. It effectively digests the string of concatonated values. Subclasses of Assoc can submit an additional array of attributes that will be appeded to the ID. Note this is equivalent to a RDF blank node :param definedby: The (data) resource that provided the annotation :param subject: :param predicate: :param object: :param attributes: :return:
def copy_selected_sources(cls, roi, source_names): roi_new = cls.make_roi() for source_name in source_names: try: src_cp = roi.copy_source(source_name) except Exception: continue roi_new.load_source(src_cp, build_index=False) return roi_new
Build and return a `fermipy.roi_model.ROIModel` object by copying selected sources from another such object
def www_authenticate(self): def on_update(www_auth): if not www_auth and 'www-authenticate' in self.headers: del self.headers['www-authenticate'] elif www_auth: self.headers['WWW-Authenticate'] = www_auth.to_header() header = self.headers.get('www-authenticate') return parse_www_authenticate_header(header, on_update)
The `WWW-Authenticate` header in a parsed form.
def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None): if ignore_index is not None: assert tensor.size() == tensor_other.size() mask_arr = tensor.ne(ignore_index) tensor = tensor.masked_select(mask_arr) tensor_other = tensor_other.masked_select(mask_arr) return torch.equal(tensor, tensor_other)
Compute ``torch.equal`` with the optional mask parameter. Args: ignore_index (int, optional): Specifies a ``tensor`` index that is ignored. Returns: (bool) Returns ``True`` if target and prediction are equal.
def gen_pdf(rst_content, style_text, header=None, footer=FOOTER): out_file_obj = StringIO() with NamedTemporaryFile() as f: f.write(style_text) f.flush() pdf = _init_pdf(f.name, header, footer) pdf.createPdf(text=rst_content, output=out_file_obj, compressed=True) out_file_obj.seek(0) return out_file_obj
Create PDF file from `rst_content` using `style_text` as style. Optinally, add `header` or `footer`. Args: rst_content (str): Content of the PDF file in restructured text markup. style_text (str): Style for the :mod:`rst2pdf` module. header (str, default None): Header which will be rendered to each page. footer (str, default FOOTER): Footer, which will be rendered to each page. See :attr:`FOOTER` for details. Returns: obj: StringIO file instance containing PDF file.
def raw_role_mentions(self): return [int(x) for x in re.findall(r'<@&([0-9]+)>', self.content)]
A property that returns an array of role IDs matched with the syntax of <@&role_id> in the message content.
def start_request(self, headers, *, end_stream=False): yield from _wait_for_events(self._resumed, self._stream_creatable) stream_id = self._conn.get_next_available_stream_id() self._priority.insert_stream(stream_id) self._priority.block(stream_id) self._conn.send_headers(stream_id, headers, end_stream=end_stream) self._flush() return stream_id
Start a request by sending given headers on a new stream, and return the ID of the new stream. This may block until the underlying transport becomes writable, and the number of concurrent outbound requests (open outbound streams) is less than the value of peer config MAX_CONCURRENT_STREAMS. The completion of the call to this method does not mean the request is successfully delivered - data is only correctly stored in a buffer to be sent. There's no guarantee it is truly delivered. :param headers: A list of key-value tuples as headers. :param end_stream: To send a request without body, set `end_stream` to `True` (default `False`). :return: Stream ID as a integer, used for further communication.
def send_os_command(self, os_command_text, is_priority=False): body = {'is-priority': is_priority, 'operating-system-command-text': os_command_text} self.manager.session.post( self.uri + '/operations/send-os-cmd', body)
Send a command to the operating system running in this partition. Parameters: os_command_text (string): The text of the operating system command. is_priority (bool): Boolean controlling whether this is a priority operating system command, as follows: * If `True`, this message is treated as a priority operating system command. * If `False`, this message is not treated as a priority operating system command. The default. Returns: None Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
def end_day_to_datetime(end_day, config): day_start_time = config['day_start'] day_end_time = get_day_end(config) if day_start_time == datetime.time(0, 0, 0): end = datetime.datetime.combine(end_day, day_end_time) else: end = datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1) return end
Convert a given end day to its proper datetime. This is non trivial because of variable ``day_start``. We want to make sure that even if an 'end day' is specified the actual point in time may reach into the following day. Args: end (datetime.date): Raw end date that is to be adjusted. config: Controller config containing information on when a workday starts. Returns: datetime.datetime: The endday as a adjusted datetime object. Example: Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to consider even points in time up to ``2015-04-02 5:29``. That is to represent that a *work day* does not match *calendar days*. Note: An alternative implementation for the similar problem in legacy hamster: ``hamster.storage.db.Storage.__get_todays_facts``.