code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _read_all_from_socket(self, timeout): pkts = [] try: self._sock.settimeout(timeout) while True: p = self._sock.recv(64) pkts.append((bytearray(p), time.time())) self._sock.settimeout(0) except socket.timeout: pass except socket.error as e: if e.errno == errno.EWOULDBLOCK: pass else: raise if self._ipv6_address_present: try: self._sock6.settimeout(timeout) while True: p = self._sock6.recv(128) pkts.append((bytearray(p), time.time())) self._sock6.settimeout(0) except socket.timeout: pass except socket.error as e: if e.errno == errno.EWOULDBLOCK: pass else: raise return pkts
Read all packets we currently can on the socket. Returns list of tuples. Each tuple contains a packet and the time at which it was received. NOTE: The receive time is the time when our recv() call returned, which greatly depends on when it was called. The time is NOT the time at which the packet arrived at our host, but it's the closest we can come to the real ping time. If nothing was received within the timeout time, the return list is empty. First read is blocking with timeout, so we'll wait at least that long. Then, in case any more packets have arrived, we read everything we can from the socket in non-blocking mode.
def insert_colorpoint(self, position=0.5, color1=[1.0,1.0,0.0], color2=[1.0,1.0,0.0]): L = self._colorpoint_list if position <= 0.0: L.insert(0,[0.0,color1,color2]) elif position >= 1.0: L.append([1.0,color1,color2]) else: for n in range(len(self._colorpoint_list)): if position <= L[n+1][0]: L.insert(n+1,[position,color1,color2]) break self.update_image() return self
Inserts the specified color into the list.
def _oxford_comma_separator(i, length): if length == 1: return None elif length < 3 and i == 0: return ' and ' elif i < length - 2: return ', ' elif i == length - 2: return ', and ' else: return None
Make a separator for a prose-like list with `,` between items except for `, and` after the second to last item.
def get_handler_name(route: Route, logic: Callable) -> str: if route.handler_name is not None: return route.handler_name if any(m for m in route.methods if m.method.lower() == 'post'): if route.heading != 'API': return '{}ListHandler'.format(get_valid_class_name(route.heading)) return '{}ListHandler'.format(get_valid_class_name(logic.__name__)) if route.heading != 'API': return '{}Handler'.format(get_valid_class_name(route.heading)) return '{}Handler'.format(get_valid_class_name(logic.__name__))
Gets the handler name. :param route: A Route instance. :param logic: The logic function. :returns: A handler class name.
def key_func(*keys, **kwargs): ensure_argcount(keys, min_=1) ensure_keyword_args(kwargs, optional=('default',)) keys = list(map(ensure_string, keys)) if 'default' in kwargs: default = kwargs['default'] def getitems(obj): for key in keys: try: obj = obj[key] except KeyError: return default return obj else: if len(keys) == 1: getitems = operator.itemgetter(keys[0]) else: def getitems(obj): for key in keys: obj = obj[key] return obj return getitems
Creates a "key function" based on given keys. Resulting function will perform lookup using specified keys, in order, on the object passed to it as an argument. For example, ``key_func('a', 'b')(foo)`` is equivalent to ``foo['a']['b']``. :param keys: Lookup keys :param default: Optional keyword argument specifying default value that will be returned when some lookup key is not present :return: Unary key function
def notification_preference(obj_type, profile): default_alert_value = True if not profile: alerts_on = True else: notifications = profile.get('notifications', {}) alerts_on = notifications.get(obj_type, default_alert_value) return dict(alerts_on=alerts_on, obj_type=obj_type)
Display two radio buttons for turning notifications on or off. The default value is is have alerts_on = True.
def interpolate(self, lon, lat, egy=None, interp_log=True): if self.data.ndim == 1: theta = np.pi / 2. - np.radians(lat) phi = np.radians(lon) return hp.pixelfunc.get_interp_val(self.counts, theta, phi, nest=self.hpx.nest) else: return self._interpolate_cube(lon, lat, egy, interp_log)
Interpolate map values. Parameters ---------- interp_log : bool Interpolate the z-coordinate in logspace.
def getSampleFrequencies(self): return np.array([round(self.samplefrequency(chn)) for chn in np.arange(self.signals_in_file)])
Returns samplefrequencies of all signals. Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> all(f.getSampleFrequencies()==200.0) True >>> f._close() >>> del f
def _tokenize_latex(self, exp): tokens = [] prevexp = "" while exp: t, exp = self._get_next_token(exp) if t.strip() != "": tokens.append(t) if prevexp == exp: break prevexp = exp return tokens
Internal method to tokenize latex
def add_densities(density1, density2): return {spin: np.array(density1[spin]) + np.array(density2[spin]) for spin in density1.keys()}
Method to sum two densities. Args: density1: First density. density2: Second density. Returns: Dict of {spin: density}.
def register(self, target): for rule, options in self.url_rules: target.add_url_rule(rule, self.name, self.dispatch_request, **options)
Registers url_rules on the blueprint
def addCategory(self, categoryUri, weight): assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer" self.topicPage["categories"].append({"uri": categoryUri, "wgt": weight})
add a relevant category to the topic page @param categoryUri: uri of the category to be added @param weight: importance of the provided category (typically in range 1 - 50)
def format_vk(vk): for ext in get_extensions_filtered(vk): req = ext['require'] if not isinstance(req, list): ext['require'] = [req]
Format vk before using it
def add(self, path): with salt.utils.files.fopen(path, 'rb') as ifile: for chunk in iter(lambda: ifile.read(self.__buff), b''): self.__digest.update(chunk)
Update digest with the file content by path. :param path: :return:
def smart_object(self): if not hasattr(self, '_smart_object'): self._smart_object = SmartObject(self) return self._smart_object
Associated smart object. :return: :py:class:`~psd_tools.api.smart_object.SmartObject`.
def file2abspath(filename, this_file=__file__): return os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(this_file)), filename))
generate absolute path for the given file and base dir
def send_datagram(self, message): if not self.stopped.isSet(): host, port = message.destination logger.debug("send_datagram - " + str(message)) serializer = Serializer() message = serializer.serialize(message) self._socket.sendto(message, (host, port))
Send a message through the udp socket. :type message: Message :param message: the message to send
def assert_is_not(expected, actual, message=None, extra=None): assert expected is not actual, _assert_fail_message( message, expected, actual, "is", extra )
Raises an AssertionError if expected is actual.
def get_instance(self, payload): return TodayInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Build an instance of TodayInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.today.TodayInstance :rtype: twilio.rest.api.v2010.account.usage.record.today.TodayInstance
def shift_and_pad(tensor, shift, axis=0): shape = tensor.shape rank = len(shape) assert 0 <= abs(axis) < rank length = int(shape[axis]) assert 0 <= abs(shift) < length paddings = [(0, 0)] * rank begin = [0] * rank size = [-1] * rank if shift > 0: paddings[axis] = (shift, 0) size[axis] = length - shift elif shift < 0: paddings[axis] = (0, -shift) begin[axis] = -shift ret = tf.pad(tf.slice(tensor, begin, size), paddings) return ret
Shifts and pads with zero along an axis. Example: shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2] shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0] Args: tensor: Tensor; to be shifted and padded. shift: int; number of positions to shift by. axis: int; along which axis to shift and pad. Returns: A Tensor with the same shape as the input tensor.
def _nix_env(): nixhome = os.path.join(os.path.expanduser('~{0}'.format(__opts__['user'])), '.nix-profile/bin/') return [os.path.join(nixhome, 'nix-env')]
nix-env with quiet option. By default, nix is extremely verbose and prints the build log of every package to stderr. This tells nix to only show changes.
def get_chromecasts(tries=None, retry_wait=None, timeout=None, blocking=True, callback=None): if blocking: hosts = discover_chromecasts() cc_list = [] for host in hosts: try: cc_list.append(_get_chromecast_from_host( host, tries=tries, retry_wait=retry_wait, timeout=timeout, blocking=blocking)) except ChromecastConnectionError: pass return cc_list else: if not callable(callback): raise ValueError( "Nonblocking discovery requires a callback function.") def internal_callback(name): try: callback(_get_chromecast_from_host( listener.services[name], tries=tries, retry_wait=retry_wait, timeout=timeout, blocking=blocking)) except ChromecastConnectionError: pass def internal_stop(): stop_discovery(browser) listener, browser = start_discovery(internal_callback) return internal_stop
Searches the network for chromecast devices. If blocking = True, returns a list of discovered chromecast devices. If blocking = False, triggers a callback for each discovered chromecast, and returns a function which can be executed to stop discovery. May return an empty list if no chromecasts were found. Tries is specified if you want to limit the number of times the underlying socket associated with your Chromecast objects will retry connecting if connection is lost or it fails to connect in the first place. The number of seconds spent between each retry can be defined by passing the retry_wait parameter, the default is to wait 5 seconds.
def _run_bunny(args): main_file, json_file, project_name = _get_main_and_json(args.directory) work_dir = utils.safe_makedir(os.path.join(os.getcwd(), "bunny_work")) flags = ["-b", work_dir] log_file = os.path.join(work_dir, "%s-bunny.log" % project_name) if os.path.exists(work_dir): caches = [os.path.join(work_dir, d) for d in os.listdir(work_dir) if os.path.isdir(os.path.join(work_dir, d))] if caches: flags += ["--cache-dir", max(caches, key=os.path.getmtime)] if args.no_container: _remove_bcbiovm_path() flags += ["--no-container"] cmd = ["rabix"] + flags + [main_file, json_file] with utils.chdir(work_dir): _run_tool(cmd, not args.no_container, work_dir, log_file)
Run CWL with rabix bunny.
def user_has_permission(self, user, name): targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if not targetRecord: return False for group in targetRecord.groups: if self.has_permission(group.role, name): return True return False
verify user has permission
def pairwise( iterable: Iterable, default_value: Any, ) -> Iterable[Tuple[Any, Any]]: a, b = tee(iterable) _ = next(b, default_value) return zip_longest(a, b, fillvalue=default_value)
Return pairs of items from `iterable`. pairwise([1, 2, 3], default_value=None) -> (1, 2) (2, 3), (3, None)
def load_bookmark(self, slot_num): bookmarks = CONF.get('editor', 'bookmarks') if slot_num in bookmarks: filename, line_num, column = bookmarks[slot_num] else: return if not osp.isfile(filename): self.last_edit_cursor_pos = None return self.load(filename) editor = self.get_current_editor() if line_num < editor.document().lineCount(): linelength = len(editor.document() .findBlockByNumber(line_num).text()) if column <= linelength: editor.go_to_line(line_num + 1, column) else: editor.go_to_line(line_num + 1, linelength)
Set cursor to bookmarked file and position.
def encode(self, value): if value is None and self._default is not None: value = self._default for encoder in self._encoders: try: return encoder(value) except ValueError as ex: pass raise ValueError('Value \'{}\' is invalid. {}' .format(value, ex.message))
The encoder for this schema. Tries each encoder in order of the types specified for this schema.
def work(self, interval=5): self._setproctitle("Starting") logger.info("starting") self.startup() while True: if self._shutdown: logger.info('shutdown scheduled') break self.register_worker() job = self.reserve(interval) if job: self.fork_worker(job) else: if interval == 0: break self._setproctitle("Waiting") self.unregister_worker()
Invoked by ``run`` method. ``work`` listens on a list of queues and sleeps for ``interval`` time. ``interval`` -- Number of seconds the worker will wait until processing the next job. Default is "5". Whenever a worker finds a job on the queue it first calls ``reserve`` on that job to make sure another worker won't run it, then *forks* itself to work on that job.
def getSignificance(wk1, wk2, nout, ofac): expy = exp(-wk2) effm = 2.0*(nout)/ofac sig = effm*expy ind = (sig > 0.01).nonzero() sig[ind] = 1.0-(1.0-expy[ind])**effm return sig
returns the peak false alarm probabilities Hence the lower is the probability and the more significant is the peak
def xray(im, direction='X'): r im = sp.array(~im, dtype=int) if direction in ['Y', 'y']: im = sp.transpose(im, axes=[1, 0, 2]) if direction in ['Z', 'z']: im = sp.transpose(im, axes=[2, 1, 0]) im = sp.sum(im, axis=0) return im
r""" Simulates an X-ray radiograph looking through the porouls material in the specfied direction. The resulting image is colored according to the amount of attenuation an X-ray would experience, so regions with more solid will appear darker. Parameters ---------- im : array_like ND-image of the porous material with the solid phase marked as 1 or True direction : string Specify the axis along which the camera will point. Options are 'X', 'Y', and 'Z'. Returns ------- image : 2D-array A 2D greyscale image suitable for use in matplotlib\'s ```imshow``` function.
def get(self): header = '' while len(header) < self.HEADER_LENGTH: chunk = self._sock.recv(self.HEADER_LENGTH - len(header)) chunk = chunk.decode() if self._encode else chunk if chunk == '': return None header += chunk length = int(header) message = '' while len(message) < length: chunk = self._sock.recv(length - len(message)) chunk = chunk.decode() if self._encode else chunk if chunk == '': return None message += chunk return message
Receive a message. Return the message upon successful reception, or None upon failure.
def yticksize(self, size, index=1): self.layout['yaxis' + str(index)]['tickfont']['size'] = size return self
Set the tick font size. Parameters ---------- size : int Returns ------- Chart
def delete_webhook(webhook_id): webhook = get_data_or_404('webhook', webhook_id) action = get_data_or_404('action', webhook['action_id']) project = get_data_or_404('project', action['project_id']) if project['owner_id'] != get_current_user_id(): return jsonify(message='forbidden'), 403 delete_instance('webhook', action['id']) return jsonify({})
Delete webhook.
def upsert(self, _id, dct, attribute="_id"): mongo_response = yield self.update(_id, dct, upsert=True, attribute=attribute) raise Return(mongo_response)
Update or Insert a new document :param str _id: The document id :param dict dct: The dictionary to set on the document :param str attribute: The attribute to query for to find the object to set this data on :returns: JSON Mongo client response including the "n" key to show number of objects effected
def uniform_cost(problem, graph_search=False, viewer=None): return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeCostOrdered, graph_replace_when_better=True, viewer=viewer)
Uniform cost search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, and SearchProblem.cost.
def f1_score(y_true, y_pred, average='micro', suffix=False): true_entities = set(get_entities(y_true, suffix)) pred_entities = set(get_entities(y_pred, suffix)) nb_correct = len(true_entities & pred_entities) nb_pred = len(pred_entities) nb_true = len(true_entities) p = nb_correct / nb_pred if nb_pred > 0 else 0 r = nb_correct / nb_true if nb_true > 0 else 0 score = 2 * p * r / (p + r) if p + r > 0 else 0 return score
Compute the F1 score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: >>> from seqeval.metrics import f1_score >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> f1_score(y_true, y_pred) 0.50
def _initialize_table(self, column_count): header = [''] * column_count alignment = [self.default_alignment] * column_count width = [0] * column_count padding = [self.default_padding] * column_count self._column_count = column_count self._column_headers = HeaderData(self, header) self._column_alignments = AlignmentMetaData(self, alignment) self._column_widths = PositiveIntegerMetaData(self, width) self._left_padding_widths = PositiveIntegerMetaData(self, padding) self._right_padding_widths = PositiveIntegerMetaData(self, padding)
Sets the column count of the table. This method is called to set the number of columns for the first time. Parameters ---------- column_count : int number of columns in the table
def get_random_label(): return ''.join(random.choice(string.ascii_uppercase + string.digits) \ for _ in range(15))
Get a random label string to use when clustering jobs.
def contains(self, rect): return (rect.y >= self.y and \ rect.x >= self.x and \ rect.y+rect.height <= self.y+self.height and \ rect.x+rect.width <= self.x+self.width)
Tests if another rectangle is contained by this one Arguments: rect (Rectangle): The other rectangle Returns: bool: True if it is container, False otherwise
def typedefs( self, name=None, function=None, header_dir=None, header_file=None, recursive=None, allow_empty=None): return ( self._find_multiple( self._impl_matchers[scopedef_t.typedef], name=name, function=function, decl_type=self._impl_decl_types[ scopedef_t.typedef], header_dir=header_dir, header_file=header_file, recursive=recursive, allow_empty=allow_empty) )
returns a set of typedef declarations, that are matched defined criteria
def mustExposeRequest(self, service_request): expose_request = service_request.service.mustExposeRequest(service_request) if expose_request is None: if self.expose_request is None: return False return self.expose_request return expose_request
Decides whether the underlying http request should be exposed as the first argument to the method call. This is granular, looking at the service method first, then at the service level and finally checking the gateway. @rtype: C{bool}
def sample(self, num): if num > len(self): return self.copy() elif num < 0: raise IndexError("Cannot sample a negative number of rows " "from a DataTable") random_row_mask = ([True] * num) + ([False] * (len(self) - num)) shuffle(random_row_mask) sampled_table = self.mask(random_row_mask) random_col_name = 'random_sorting_column' while random_col_name in sampled_table: random_col_name = '%030x' % randrange(16**30) sampled_table[random_col_name] = [random() for _ in xrange(len(sampled_table))] sampled_table.sort(random_col_name, inplace=True) del sampled_table[random_col_name] return sampled_table
Returns a new table with rows randomly sampled. We create a mask with `num` True bools, and fill it with False bools until it is the length of the table. We shuffle it, and apply that mask to the table.
def _GetClientLibCallback(args, client_func=_GetClientLib): client_paths = client_func( args.service, args.language, args.output, args.build_system, hostname=args.hostname, application_path=args.application) for client_path in client_paths: print 'API client library written to %s' % client_path
Generate discovery docs and client libraries to files. Args: args: An argparse.Namespace object to extract parameters from. client_func: A function that generates client libraries and stores them to files, accepting a list of service names, a client library language, an output directory, a build system for the client library language, and a hostname.
def _get_magnitude_term(self, C, mag): f_mag = C["c0"] + C["c1"] * mag if (mag > 4.5) and (mag <= 5.5): return f_mag + (C["c2"] * (mag - 4.5)) elif (mag > 5.5) and (mag <= 6.5): return f_mag + (C["c2"] * (mag - 4.5)) + (C["c3"] * (mag - 5.5)) elif mag > 6.5: return f_mag + (C["c2"] * (mag - 4.5)) + (C["c3"] * (mag - 5.5)) +\ (C["c4"] * (mag - 6.5)) else: return f_mag
Returns the magnitude scaling term defined in equation 2
def oauth_manager(self, oauth_manager): @self.app.before_request def before_request(): endpoint = request.endpoint resource = self.app.view_functions[endpoint].view_class if not getattr(resource, 'disable_oauth'): scopes = request.args.get('scopes') if getattr(resource, 'schema'): scopes = [self.build_scope(resource, request.method)] elif scopes: scopes = scopes.split(',') if scopes: scopes = scopes.split(',') valid, req = oauth_manager.verify_request(scopes) for func in oauth_manager._after_request_funcs: valid, req = func(valid, req) if not valid: if oauth_manager._invalid_response: return oauth_manager._invalid_response(req) return abort(401) request.oauth = req
Use the oauth manager to enable oauth for API :param oauth_manager: the oauth manager
def _parse_ergodic_cutoff(self): ec_is_str = isinstance(self.ergodic_cutoff, str) if ec_is_str and self.ergodic_cutoff.lower() == 'on': if self.sliding_window: return 1.0 / self.lag_time else: return 1.0 elif ec_is_str and self.ergodic_cutoff.lower() == 'off': return 0.0 else: return self.ergodic_cutoff
Get a numeric value from the ergodic_cutoff input, which can be 'on' or 'off'.
def checkFuelPosition(obs, agent_host): for i in range(1,39): key = 'InventorySlot_'+str(i)+'_item' if key in obs: item = obs[key] if item == 'coal': agent_host.sendCommand("swapInventoryItems 0 " + str(i)) return
Make sure our coal, if we have any, is in slot 0.
def calc_file_md5(filepath, chunk_size=None): if chunk_size is None: chunk_size = 256 * 1024 md5sum = hashlib.md5() with io.open(filepath, 'r+b') as f: datachunk = f.read(chunk_size) while datachunk is not None and len(datachunk) > 0: md5sum.update(datachunk) datachunk = f.read(chunk_size) return md5sum.hexdigest()
Calculate a file's md5 checksum. Use the specified chunk_size for IO or the default 256KB :param filepath: :param chunk_size: :return:
async def get_kernel_options(cls) -> typing.Optional[str]: data = await cls.get_config("kernel_opts") return None if data is None or data == "" else data
Kernel options. Boot parameters to pass to the kernel by default.
def print_modules(self): shutit_global.shutit_global_object.yield_to_draw() cfg = self.cfg module_string = '' module_string += 'Modules: \n' module_string += ' Run order Build Remove Module ID\n' for module_id in self.module_ids(): module_string += ' ' + str(self.shutit_map[module_id].run_order) + ' ' + str( cfg[module_id]['shutit.core.module.build']) + ' ' + str( cfg[module_id]['shutit.core.module.remove']) + ' ' + module_id + '\n' return module_string
Returns a string table representing the modules in the ShutIt module map.
def create_connection_model(service): services = service._services bases = (BaseModel,) attributes = {model_service_name(service): fields.CharField() for service in services} return type(BaseModel)(connection_service_name(service), bases, attributes)
Create an SQL Alchemy table that connects the provides services
def clear_genus_type(self): if (self.get_genus_type_metadata().is_read_only() or self.get_genus_type_metadata().is_required()): raise errors.NoAccess() self._my_map['genusTypeId'] = self._genus_type_default
Clears the genus type. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def install_caller_instruction(self, token_type="Unrestricted", transaction_id=None): response = self.install_payment_instruction("MyRole=='Caller';", token_type=token_type, transaction_id=transaction_id) body = response.read() if(response.status == 200): rs = ResultSet() h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) caller_token = rs.TokenId try: boto.config.save_system_option("FPS", "caller_token", caller_token) except(IOError): boto.config.save_user_option("FPS", "caller_token", caller_token) return caller_token else: raise FPSResponseError(response.status, response.reason, body)
Set us up as a caller This will install a new caller_token into the FPS section. This should really only be called to regenerate the caller token.
def get_values(self, attr_name): ret = list(self._attr_value_cdist[attr_name].keys()) \ + list(self._attr_value_counts[attr_name].keys()) \ + list(self._branches.keys()) ret = set(ret) return ret
Retrieves the unique set of values seen for the given attribute at this node.
def size(self): width = c_double(0) height = c_double(0) rc = self._libinput.libinput_device_get_size( self._handle, byref(width), byref(height)) assert rc == 0, 'This device does not provide size information' return width.value, height.value
The physical size of a device in mm, where meaningful. This property is only valid on devices with the required data, i.e. tablets, touchpads and touchscreens. For other devices this property raises :exc:`AssertionError`. Returns: (float, float): (Width, Height) in mm. Raises: AssertionError
def add_dataset(data_type, val, unit_id=None, metadata={}, name="", user_id=None, flush=False): d = Dataset() d.type = data_type d.value = val d.set_metadata(metadata) d.unit_id = unit_id d.name = name d.created_by = user_id d.hash = d.set_hash() try: existing_dataset = db.DBSession.query(Dataset).filter(Dataset.hash==d.hash).one() if existing_dataset.check_user(user_id): d = existing_dataset else: d.set_metadata({'created_at': datetime.datetime.now()}) d.set_hash() db.DBSession.add(d) except NoResultFound: db.DBSession.add(d) if flush == True: db.DBSession.flush() return d
Data can exist without scenarios. This is the mechanism whereby single pieces of data can be added without doing it through a scenario. A typical use of this would be for setting default values on types.
def _copy_binder_notebooks(app): gallery_conf = app.config.sphinx_gallery_conf gallery_dirs = gallery_conf.get('gallery_dirs') binder_conf = gallery_conf.get('binder') notebooks_dir = os.path.join(app.outdir, binder_conf.get('notebooks_dir')) shutil.rmtree(notebooks_dir, ignore_errors=True) os.makedirs(notebooks_dir) if not isinstance(gallery_dirs, (list, tuple)): gallery_dirs = [gallery_dirs] iterator = sphinx_compatibility.status_iterator( gallery_dirs, 'copying binder notebooks...', length=len(gallery_dirs)) for i_folder in iterator: shutil.copytree(os.path.join(app.srcdir, i_folder), os.path.join(notebooks_dir, i_folder), ignore=_remove_ipynb_files)
Copy Jupyter notebooks to the binder notebooks directory. Copy each output gallery directory structure but only including the Jupyter notebook files.
def on(self, state): self._on = state cmd = self.command_set.off() if state: cmd = self.command_set.on() self.send(cmd)
Turn on or off. :param state: True (on) or False (off).
def destroyCommit(self, varBind, **context): name, val = varBind (debug.logger & debug.FLAG_INS and debug.logger('%s: destroyCommit(%s, %r)' % (self, name, val))) instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}}) idx = context['idx'] try: instances[self.ST_DESTROY][-idx - 1] = self._vars.pop(name) except KeyError: pass cbFun = context['cbFun'] cbFun(varBind, **context)
Destroy Managed Object Instance. Implements the second of the multi-step workflow similar to the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the second phase is to actually remove requested Managed Object Instance from the MIB tree. When multiple Managed Objects Instances are destroyed/modified at once (likely coming all in one SNMP PDU), each of them has to run through the second (*commit*) phase successfully for the system to transition to the third (*cleanup*) phase. If any single *commit* step fails, the system transitions into the *undo* state for each of Managed Objects Instances being processed at once. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to destroy Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. * `instances` (dict): user-supplied dict for temporarily holding Managed Objects Instances being destroyed. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object.
def get_hessian(self): force_const = self.fields.get("Cartesian Force Constants") if force_const is None: return None N = len(self.molecule.numbers) result = np.zeros((3*N, 3*N), float) counter = 0 for row in range(3*N): result[row, :row+1] = force_const[counter:counter+row+1] result[:row+1, row] = force_const[counter:counter+row+1] counter += row + 1 return result
Return the hessian
def _build_params_from_kwargs(self, **kwargs): api_methods = self.get_api_params() required_methods = self.get_api_required_params() ret_kwargs = {} for key, val in kwargs.items(): if key not in api_methods: warnings.warn( 'Passed uknown parameter [{}]'.format(key), Warning ) continue if key not in required_methods and val is None: continue if type(val) != api_methods[key]['type']: raise ValueError( "Invalid type specified to param: {}".format(key) ) if 'max_len' in api_methods[key]: if len(val) > api_methods[key]['max_len']: raise ValueError( "Lenght of parameter [{}] more than " "allowed length".format(key) ) ret_kwargs[api_methods[key]['param']] = val for item in required_methods: if item not in ret_kwargs: raise pushalot.exc.PushalotException( "Parameter [{}] required, but not set".format(item) ) return ret_kwargs
Builds parameters from passed arguments Search passed parameters in available methods, prepend specified API key, and return dictionary which can be sent directly to API server. :param kwargs: :type param: dict :raises ValueError: If type of specified parameter doesn't match the expected type. Also raised if some basic validation of passed parameter fails. :raises PushalotException: If required parameter not set. :return: Dictionary with params which can be sent to API server :rtype: dict
def beginning_of_history(self): u self.history_cursor = 0 if len(self.history) > 0: self.l_buffer = self.history[0]
u'''Move to the first line in the history.
def _validate_sleep(minutes): if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg)
Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command
def delete_group_cached(group_id, broker=None): if not broker: broker = get_broker() group_key = '{}:{}:keys'.format(broker.list_key, group_id) group_list = broker.cache.get(group_key) broker.cache.delete_many(group_list) broker.cache.delete(group_key)
Delete a group from the cache backend
def LoadServerCertificate(self, server_certificate=None, ca_certificate=None): try: server_certificate.Verify(ca_certificate.GetPublicKey()) except rdf_crypto.VerificationError as e: self.server_name = None raise IOError("Server cert is invalid: %s" % e) server_cert_serial = server_certificate.GetSerialNumber() if server_cert_serial < config.CONFIG["Client.server_serial_number"]: raise IOError("Server certificate serial number is too old.") elif server_cert_serial > config.CONFIG["Client.server_serial_number"]: logging.info("Server serial number updated to %s", server_cert_serial) config.CONFIG.Set("Client.server_serial_number", server_cert_serial) config.CONFIG.Write() self.server_name = server_certificate.GetCN() self.server_certificate = server_certificate self.ca_certificate = ca_certificate self.server_public_key = server_certificate.GetPublicKey() self._ClearServerCipherCache()
Loads and verifies the server certificate.
def IsDirectory(self): if self._stat_object is None: self._stat_object = self._GetStat() if self._stat_object is not None: self.entry_type = self._stat_object.type return self.entry_type == definitions.FILE_ENTRY_TYPE_DIRECTORY
Determines if the file entry is a directory. Returns: bool: True if the file entry is a directory.
def base_id(self): if self._base_id is not None: return self._base_id self.send(Packet(PACKET.COMMON_COMMAND, data=[0x08])) for i in range(0, 10): try: packet = self.receive.get(block=True, timeout=0.1) if packet.packet_type == PACKET.RESPONSE and packet.response == RETURN_CODE.OK and len(packet.response_data) == 4: self._base_id = packet.response_data self.receive.put(packet) break self.receive.put(packet) except queue.Empty: continue return self._base_id
Fetches Base ID from the transmitter, if required. Otherwise returns the currently set Base ID.
def writer(f): return unicodecsv.writer(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
CSV writer factory for CADA format
def get_day_of_month(datestring): get_day = re.compile(r"\d{1,2}(st|nd|rd|th)?", re.IGNORECASE) day = get_day.search(datestring) the_day = None if day: if bool(re.search(r"[st|nd|rd|th]", day.group().lower())): the_day = day.group()[:-2] else: the_day = day.group() if int(the_day) < 10: the_day = add_zero(the_day) return str(the_day)
Transforms an ordinal number into plain number with padding zero. E.g. 3rd -> 03, or 12th -> 12 Keyword arguments: datestring -- a string Returns: String, or None if the transformation fails
def initialize(**kwargs): global config config_opts = kwargs.setdefault('config',{}) if isinstance(config_opts,basestring): config_opts = {'config_filename':config_opts} kwargs['config'] = config_opts if 'environment' in kwargs: config_opts['environment'] = kwargs['environment'] config.load_config(**config_opts) if kwargs.get('name'): subconfig = config.get(kwargs.get('name'),{}) config.overlay_add(subconfig) config.overlay_add(app_config)
Loads the globally shared YAML configuration
def get_asset_content_lookup_session_for_repository(self, repository_id=None): return AssetContentLookupSession( self._provider_manager.get_asset_content_lookup_session_for_repository(repository_id), self._config_map)
Gets the ``OsidSession`` associated with the asset content lookup service for the given repository. arg: repository_id (osid.id.Id): the ``Id`` of the repository return: (osid.repository.AssetLookupSession) - the new ``AssetLookupSession`` raise: NotFound - ``repository_id`` not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_asset_lookup()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_lookup()`` and ``supports_visible_federation()`` are ``true``.*
def get_non_magic_cols(self): table_dm = self.data_model.dm[self.dtype] approved_cols = table_dm.index unrecognized_cols = (set(self.df.columns) - set(approved_cols)) return unrecognized_cols
Find all columns in self.df that are not real MagIC 3 columns. Returns -------- unrecognized_cols : list
def _process_dimension_kwargs(direction, kwargs): acceptable_keys = ['unit', 'pad', 'lim', 'label'] processed_kwargs = {} for k,v in kwargs.items(): if k.startswith(direction): processed_key = k.lstrip(direction) else: processed_key = k if processed_key in acceptable_keys: processed_kwargs[processed_key] = v return processed_kwargs
process kwargs for AxDimension instances by stripping off the prefix for the appropriate direction
def execute(self, operation, parameters=None, job_id=None): self._query_data = None self._query_job = None client = self.connection._client formatted_operation = _format_operation(operation, parameters=parameters) query_parameters = _helpers.to_query_parameters(parameters) config = job.QueryJobConfig() config.query_parameters = query_parameters config.use_legacy_sql = False self._query_job = client.query( formatted_operation, job_config=config, job_id=job_id ) try: self._query_job.result() except google.cloud.exceptions.GoogleCloudError as exc: raise exceptions.DatabaseError(exc) query_results = self._query_job._query_results self._set_rowcount(query_results) self._set_description(query_results.schema)
Prepare and execute a database operation. .. note:: When setting query parameters, values which are "text" (``unicode`` in Python2, ``str`` in Python3) will use the 'STRING' BigQuery type. Values which are "bytes" (``str`` in Python2, ``bytes`` in Python3), will use using the 'BYTES' type. A `~datetime.datetime` parameter without timezone information uses the 'DATETIME' BigQuery type (example: Global Pi Day Celebration March 14, 2017 at 1:59pm). A `~datetime.datetime` parameter with timezone information uses the 'TIMESTAMP' BigQuery type (example: a wedding on April 29, 2011 at 11am, British Summer Time). For more information about BigQuery data types, see: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types ``STRUCT``/``RECORD`` and ``REPEATED`` query parameters are not yet supported. See: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524 :type operation: str :param operation: A Google BigQuery query string. :type parameters: Mapping[str, Any] or Sequence[Any] :param parameters: (Optional) dictionary or sequence of parameter values. :type job_id: str :param job_id: (Optional) The job_id to use. If not set, a job ID is generated at random.
def MGMT_ANNOUNCE_BEGIN(self, sAddr, xCommissionerSessionId, listChannelMask, xCount, xPeriod): print '%s call MGMT_ANNOUNCE_BEGIN' % self.port channelMask = '' channelMask = self.__ChannelMaskListToStr(listChannelMask) try: cmd = WPANCTL_CMD + 'commissioner announce-begin %s %s %s %s' % (channelMask, xCount, xPeriod, sAddr) print cmd return self.__sendCommand(cmd) != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('MGMT_ANNOUNCE_BEGIN() error: ' + str(e))
send MGMT_ANNOUNCE_BEGIN message to a given destination Returns: True: successful to send MGMT_ANNOUNCE_BEGIN message. False: fail to send MGMT_ANNOUNCE_BEGIN message.
def handle_joined(self, connection, event): nicknames = [s.lstrip("@+") for s in event.arguments()[-1].split()] for nickname in nicknames: self.joined[nickname] = datetime.now()
Store join times for current nicknames when we first join.
def describe_snapshots(self, *snapshot_ids): snapshot_set = {} for pos, snapshot_id in enumerate(snapshot_ids): snapshot_set["SnapshotId.%d" % (pos + 1)] = snapshot_id query = self.query_factory( action="DescribeSnapshots", creds=self.creds, endpoint=self.endpoint, other_params=snapshot_set) d = query.submit() return d.addCallback(self.parser.snapshots)
Describe available snapshots. TODO: ownerSet, restorableBySet
def upgradedb(options): version = options.get('version') if version in ['1.1', '1.2']: sh("python manage.py migrate maps 0001 --fake") sh("python manage.py migrate avatar 0001 --fake") elif version is None: print "Please specify your GeoNode version" else: print "Upgrades from version %s are not yet supported." % version
Add 'fake' data migrations for existing tables from legacy GeoNode versions
def answer_shipping_query(self, shipping_query_id, ok, shipping_options=None, error_message=None): from pytgbot.api_types.sendable.payments import ShippingOption assert_type_or_raise(shipping_query_id, unicode_type, parameter_name="shipping_query_id") assert_type_or_raise(ok, bool, parameter_name="ok") assert_type_or_raise(shipping_options, None, list, parameter_name="shipping_options") assert_type_or_raise(error_message, None, unicode_type, parameter_name="error_message") result = self.do("answerShippingQuery", shipping_query_id=shipping_query_id, ok=ok, shipping_options=shipping_options, error_message=error_message) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) try: return from_array_list(bool, result, list_level=0, is_builtin=True) except TgApiParseException: logger.debug("Failed parsing as primitive bool", exc_info=True) raise TgApiParseException("Could not parse result.") return result
If you sent an invoice requesting a shipping address and the parameter is_flexible was specified, the Bot API will send an Update with a shipping_query field to the bot. Use this method to reply to shipping queries. On success, True is returned. https://core.telegram.org/bots/api#answershippingquery Parameters: :param shipping_query_id: Unique identifier for the query to be answered :type shipping_query_id: str|unicode :param ok: Specify True if delivery to the specified address is possible and False if there are any problems (for example, if delivery to the specified address is not possible) :type ok: bool Optional keyword parameters: :param shipping_options: Required if ok is True. A JSON-serialized array of available shipping options. :type shipping_options: list of pytgbot.api_types.sendable.payments.ShippingOption :param error_message: Required if ok is False. Error message in human readable form that explains why it is impossible to complete the order (e.g. "Sorry, delivery to your desired address is unavailable'). Telegram will display this message to the user. :type error_message: str|unicode Returns: :return: On success, True is returned :rtype: bool
def vars_args(parser): parser.add_argument('--extra-vars', dest='extra_vars', help='Extra template variables', default=[], type=str, action='append') parser.add_argument('--extra-vars-file', dest='extra_vars_file', help='YAML files full of variables', default=[], type=str, action='append')
Add various command line options for external vars
async def get_power_parameters(self): data = await self._handler.power_parameters(system_id=self.system_id) return data
Get the power paramters for this node.
def __PrintAdditionalImports(self, imports): google_imports = [x for x in imports if 'google' in x] other_imports = [x for x in imports if 'google' not in x] if other_imports: for import_ in sorted(other_imports): self.__printer(import_) self.__printer() if google_imports: for import_ in sorted(google_imports): self.__printer(import_) self.__printer()
Print additional imports needed for protorpc.
def collectLocations(self): pts = [] for l, (value, deltaName) in self.items(): pts.append(Location(l)) return pts
Return a dictionary with all objects.
def createbranch(self, project_id, branch, ref): data = {"id": project_id, "branch_name": branch, "ref": ref} request = requests.post( '{0}/{1}/repository/branches'.format(self.projects_url, project_id), headers=self.headers, data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 201: return request.json() else: return False
Create branch from commit SHA or existing branch :param project_id: The ID of a project :param branch: The name of the branch :param ref: Create branch from commit SHA or existing branch :return: True if success, False if not
def _get_roles_for_request(request, application): roles = application.get_roles_for_person(request.user) if common.is_admin(request): roles.add("is_admin") roles.add('is_authorised') return roles
Check the authentication of the current user.
def msw(self): return (t for t, c in self.tcmap().items() if len(c) > 1)
Return a generator of tokens with more than one sense.
def make_executable(script_path): status = os.stat(script_path) os.chmod(script_path, status.st_mode | stat.S_IEXEC)
Make `script_path` executable. :param script_path: The file to change
def do(cmdline, runas=None, env=None): if not cmdline: raise SaltInvocationError('Command must be specified') path = _rbenv_path(runas) if not env: env = {} env[str('PATH')] = salt.utils.stringutils.to_str( os.pathsep.join(( salt.utils.path.join(path, 'shims'), salt.utils.stringutils.to_unicode(os.environ['PATH']) )) ) try: cmdline = salt.utils.args.shlex_split(cmdline) except AttributeError: cmdauth = salt.utils.args.shlex_split(six.text_type(cmdline)) result = __salt__['cmd.run_all']( cmdline, runas=runas, env=env, python_shell=False ) if result['retcode'] == 0: rehash(runas=runas) return result['stdout'] else: return False
Execute a ruby command with rbenv's shims from the user or the system CLI Example: .. code-block:: bash salt '*' rbenv.do 'gem list bundler' salt '*' rbenv.do 'gem list bundler' deploy
def _WsdlHasMethod(self, method_name): return method_name in self.suds_client.wsdl.services[0].ports[0].methods
Determine if the wsdl contains a method. Args: method_name: The name of the method to search. Returns: True if the method is in the WSDL, otherwise False.
def adjust_white_for_scc(cls, rgb_p, rgb_b, rgb_w, p): p_rgb = rgb_p / rgb_b rgb_w = rgb_w * (((1 - p) * p_rgb + (1 + p) / p_rgb) ** 0.5) / (((1 + p) * p_rgb + (1 - p) / p_rgb) ** 0.5) return rgb_w
Adjust the white point for simultaneous chromatic contrast. :param rgb_p: Cone signals of proxima field. :param rgb_b: Cone signals of background. :param rgb_w: Cone signals of reference white. :param p: Simultaneous contrast/assimilation parameter. :return: Adjusted cone signals for reference white.
def ConvertToTemplate(server,template,password=None,alias=None): if alias is None: alias = clc.v1.Account.GetAlias() if password is None: password = clc.v1.Server.GetCredentials([server,],alias)[0]['Password'] r = clc.v1.API.Call('post','Server/ConvertServerToTemplate', { 'AccountAlias': alias, 'Name': server, 'Password': password, 'TemplateAlias': template }) return(r)
Converts an existing server into a template. http://www.centurylinkcloud.com/api-docs/v1/#server-convert-server-to-template :param server: source server to convert :param template: name of destination template :param password: source server password (optional - will lookup password if None) :param alias: short code for a particular account. If none will use account's default alias
def run(self): "Run each middleware function on files" files = self.get_files() for func in self.middleware: func(files, self) self.files.update(files) return files
Run each middleware function on files
def save(self, out, kind=None, **kw): writers.save(self.matrix, self._version, out, kind, **kw)
\ Serializes the QR Code in one of the supported formats. The serialization format depends on the filename extension. **Common keywords** ========== ============================================================== Name Description ========== ============================================================== scale Integer or float indicating the size of a single module. Default: 1. The interpretation of the scaling factor depends on the serializer. For pixel-based output (like PNG) the scaling factor is interepreted as pixel-size (1 = 1 pixel). EPS interprets ``1`` as 1 point (1/72 inch) per module. Some serializers (like SVG) accept float values. If the serializer does not accept float values, the value will be converted to an integer value (note: int(1.6) == 1). border Integer indicating the size of the quiet zone. If set to ``None`` (default), the recommended border size will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes). color A string or tuple representing a color value for the dark modules. The default value is "black". The color can be provided as ``(R, G, B)`` tuple, as web color name (like "red") or in hexadecimal format (``#RGB`` or ``#RRGGBB``). Some serializers (SVG and PNG) accept an alpha transparency value like ``#RRGGBBAA``. background A string or tuple representing a color for the light modules or background. See "color" for valid values. The default value depends on the serializer. SVG uses no background color (``None``) by default, other serializers use "white" as default background color. ========== ============================================================== **Scalable Vector Graphics (SVG)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.BytesIO kind "svg" or "svgz" (to create a gzip compressed SVG) scale integer or float color Default: "#000" (black) ``None`` is a valid value. If set to ``None``, the resulting path won't have a "stroke" attribute. The "stroke" attribute may be defined via CSS (external). If an alpha channel is defined, the output depends of the used SVG version. For SVG versions >= 2.0, the "stroke" attribute will have a value like "rgba(R, G, B, A)", otherwise the path gets another attribute "stroke-opacity" to emulate the alpha channel. To minimize the document size, the SVG serializer uses automatically the shortest color representation: If a value like "#000000" is provided, the resulting document will have a color value of "#000". If the color is "#FF0000", the resulting color is not "#F00", but the web color name "red". background Default value ``None``. If this paramater is set to another value, the resulting image will have another path which is used to define the background color. If an alpha channel is used, the resulting path may have a "fill-opacity" attribute (for SVG version < 2.0) or the "fill" attribute has a "rgba(R, G, B, A)" value. See keyword "color" for further details. xmldecl Boolean value (default: ``True``) indicating whether the document should have an XML declaration header. Set to ``False`` to omit the header. svgns Boolean value (default: ``True``) indicating whether the document should have an explicit SVG namespace declaration. Set to ``False`` to omit the namespace declaration. The latter might be useful if the document should be embedded into a HTML 5 document where the SVG namespace is implicitly defined. title String (default: ``None``) Optional title of the generated SVG document. desc String (default: ``None``) Optional description of the generated SVG document. svgid A string indicating the ID of the SVG document (if set to ``None`` (default), the SVG element won't have an ID). svgclass Default: "segno". The CSS class of the SVG document (if set to ``None``, the SVG element won't have a class). lineclass Default: "qrline". The CSS class of the path element (which draws the dark modules (if set to ``None``, the path won't have a class). omitsize Indicates if width and height attributes should be omitted (default: ``False``). If these attributes are omitted, a ``viewBox`` attribute will be added to the document. unit Default: ``None`` Inidctaes the unit for width / height and other coordinates. By default, the unit is unspecified and all values are in the user space. Valid values: em, ex, px, pt, pc, cm, mm, in, and percentages (any string is accepted, this parameter is not validated by the serializer) encoding Encoding of the XML document. "utf-8" by default. svgversion SVG version (default: ``None``). If specified (a float), the resulting document has an explicit "version" attribute. If set to ``None``, the document won't have a "version" attribute. This parameter is not validated. compresslevel Default: 9. This parameter is only valid, if a compressed SVG document should be created (file extension "svgz"). 1 is fastest and produces the least compression, 9 is slowest and produces the most. 0 is no compression. ============= ============================================================== **Portable Network Graphics (PNG)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.BytesIO kind "png" scale integer color Default: "#000" (black) ``None`` is a valid value iff background is not ``None``. background Default value ``#fff`` (white) See keyword "color" for further details. compresslevel Default: 9. Integer indicating the compression level for the ``IDAT`` (data) chunk. 1 is fastest and produces the least compression, 9 is slowest and produces the most. 0 is no compression. dpi Default: None. Specifies the DPI value for the image. By default, the DPI value is unspecified. Please note that the DPI value is converted into meters (maybe with rounding errors) since PNG does not support the unit "dots per inch". addad Boolean value (default: True) to (dis-)allow a "Software" comment indicating that the file was created by Segno. ============= ============================================================== **Encapsulated PostScript (EPS)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.StringIO kind "eps" scale integer or float color Default: "#000" (black) background Default value: ``None`` (no background) ============= ============================================================== **Portable Document Format (PDF)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.BytesIO kind "pdf" scale integer or float compresslevel Default: 9. Integer indicating the compression level. 1 is fastest and produces the least compression, 9 is slowest and produces the most. 0 is no compression. ============= ============================================================== **Text (TXT)** Does not support the "scale" keyword! ============= ============================================================== Name Description ============= ============================================================== out Filename or io.StringIO kind "txt" color Default: "1" background Default: "0" ============= ============================================================== **ANSI escape code** Supports the "border" keyword, only! ============= ============================================================== Name Description ============= ============================================================== kind "ans" ============= ============================================================== **Portable Bitmap (PBM)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.BytesIO kind "pbm" scale integer plain Default: False. Boolean to switch between the P4 and P1 format. If set to ``True``, the (outdated) P1 serialization format is used. ============= ============================================================== **Portable Arbitrary Map (PAM)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.BytesIO kind "pam" scale integer color Default: "#000" (black). background Default value ``#fff`` (white). Use ``None`` for a transparent background. ============= ============================================================== **LaTeX / PGF/TikZ** To use the output of this serializer, the ``PGF/TikZ`` (and optionally ``hyperref``) package is required in the LaTeX environment. The serializer itself does not depend on any external packages. ============= ============================================================== Name Description ============= ============================================================== out Filename or io.StringIO kind "tex" scale integer or float color LaTeX color name (default: "black"). The color is written "at it is", so ensure that the color is a standard color or it has been defined in the enclosing LaTeX document. url Default: ``None``. Optional URL where the QR Code should point to. Requires the ``hyperref`` package in your LaTeX environment. ============= ============================================================== **X BitMap (XBM)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.StringIO kind "xbm" scale integer name Name of the variable (default: "img") ============= ============================================================== **X PixMap (XPM)** ============= ============================================================== Name Description ============= ============================================================== out Filename or io.StringIO kind "xpm" scale integer color Default: "#000" (black). background Default value ``#fff`` (white) ``None`` indicates a transparent background. name Name of the variable (default: "img") ============= ============================================================== :param out: A filename or a writable file-like object with a ``name`` attribute. Use the `kind` parameter if `out` is a :py:class:`io.ByteIO` or :py:class:`io.StringIO` stream which don't have a ``name`` attribute. :param kind: If the desired output format cannot be determined from the ``out`` parameter, this parameter can be used to indicate the serialization format (i.e. "svg" to enforce SVG output) :param kw: Any of the supported keywords by the specific serialization method.
def _macaroons_for_domain(cookies, domain): req = urllib.request.Request('https://' + domain + '/') cookies.add_cookie_header(req) return httpbakery.extract_macaroons(req)
Return any macaroons from the given cookie jar that apply to the given domain name.
def cuts_outside(self): for index in self.cut_site: if index < 0 or index > len(self.recognition_site) + 1: return True return False
Report whether the enzyme cuts outside its recognition site. Cutting at the very end of the site returns True. :returns: Whether the enzyme will cut outside its recognition site. :rtype: bool
def get_example(config, exam_lex): if config.BOOLEAN_STATES[config.config.get('Layout', 'examples')]: return Window( content=BufferControl( buffer_name="examples", lexer=exam_lex)) return get_empty()
example description window
def get_gene2aart(gene2section2gos, sec2chr): geneid2str = {} for geneid, section2gos_gene in gene2section2gos.items(): letters = [abc if s in section2gos_gene else "." for s, abc in sec2chr.items()] geneid2str[geneid] = "".join(letters) return geneid2str
Return a string for each gene representing GO section membership.
def extended_sys_state_send(self, vtol_state, landed_state, force_mavlink1=False): return self.send(self.extended_sys_state_encode(vtol_state, landed_state), force_mavlink1=force_mavlink1)
Provides state for additional features vtol_state : The VTOL state if applicable. Is set to MAV_VTOL_STATE_UNDEFINED if UAV is not in VTOL configuration. (uint8_t) landed_state : The landed state. Is set to MAV_LANDED_STATE_UNDEFINED if landed state is unknown. (uint8_t)
def _verify(self, rj, token): keys = self.key_jar.get_jwt_verify_keys(rj.jwt) return rj.verify_compact(token, keys)
Verify a signed JSON Web Token :param rj: A :py:class:`cryptojwt.jws.JWS` instance :param token: The signed JSON Web Token :return: A verified message
def _trim_zeros_complex(str_complexes, na_rep='NaN'): def separate_and_trim(str_complex, na_rep): num_arr = str_complex.split('+') return (_trim_zeros_float([num_arr[0]], na_rep) + ['+'] + _trim_zeros_float([num_arr[1][:-1]], na_rep) + ['j']) return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes]
Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those.