code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _types_match(type1, type2): if isinstance(type1, six.string_types) and \ isinstance(type2, six.string_types): type1 = type1.rstrip('?') type2 = type2.rstrip('?') if type1 != type2: return False return True
Returns False only if it can show that no value of type1 can possibly match type2. Supports only a limited selection of types.
def append(self, data, segment=0): if not hasattr(data, '__iter__'): data = [data] self._builder.append(data, segment)
Append a single row to an SFrame. Throws a RuntimeError if one or more column's type is incompatible with a type appended. Parameters ---------- data : iterable An iterable representation of a single row. segment : int The segment to write this row. Each segment is numbered sequentially, starting with 0. Any value in segment 1 will be after any value in segment 0, and the order of rows in each segment is preserved as they are added.
def largest_connected_set(C, directed=True): r if isdense(C): return sparse.connectivity.largest_connected_set(csr_matrix(C), directed=directed) else: return sparse.connectivity.largest_connected_set(C, directed=directed)
r"""Largest connected component for a directed graph with edge-weights given by the count matrix. Parameters ---------- C : scipy.sparse matrix Count matrix specifying edge weights. directed : bool, optional Whether to compute connected components for a directed or undirected graph. Default is True. Returns ------- lcc : array of integers The largest connected component of the directed graph. See also -------- connected_sets Notes ----- Viewing the count matrix as the adjacency matrix of a (directed) graph the largest connected set is the largest connected set of nodes of the corresponding graph. The largest connected set of a graph can be efficiently computed using Tarjan's algorithm. References ---------- .. [1] Tarjan, R E. 1972. Depth-first search and linear graph algorithms. SIAM Journal on Computing 1 (2): 146-160. Examples -------- >>> import numpy as np >>> from msmtools.estimation import largest_connected_set >>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]]) >>> lcc_directed = largest_connected_set(C) >>> lcc_directed array([0, 1]) >>> lcc_undirected = largest_connected_set(C, directed=False) >>> lcc_undirected array([0, 1, 2])
def open_in_composer(self): impact_layer = self.impact_function.analysis_impacted report_path = dirname(impact_layer.source()) impact_report = self.impact_function.impact_report custom_map_report_metadata = impact_report.metadata custom_map_report_product = ( custom_map_report_metadata.component_by_tags( [final_product_tag, pdf_product_tag])) for template_path in self.retrieve_paths( custom_map_report_product, report_path=report_path, suffix='.qpt'): layout = QgsPrintLayout(QgsProject.instance()) with open(template_path) as template_file: template_content = template_file.read() document = QtXml.QDomDocument() document.setContent(template_content) rwcontext = QgsReadWriteContext() load_status = layout.loadFromTemplate(document, rwcontext) if not load_status: QtWidgets.QMessageBox.warning( self, tr('InaSAFE'), tr('Error loading template: %s') % template_path) return QgsProject.instance().layoutManager().addLayout(layout) self.iface.openLayoutDesigner(layout)
Open in layout designer a given MapReport instance. .. versionadded: 4.3.0
def _validation_error(prop, prop_type, prop_value, expected): if prop_type is None: attrib = 'value' assigned = prop_value else: attrib = 'type' assigned = prop_type raise ValidationError( 'Invalid property {attrib} for {prop}:\n\t{attrib}: {assigned}\n\texpected: {expected}', attrib=attrib, prop=prop, assigned=assigned, expected=expected, invalid={prop: prop_value} if attrib == 'value' else {} )
Default validation for updated properties
def func( coroutine: Union[str, Function, Callable], *, name: Optional[str] = None, keep_result: Optional[SecondsTimedelta] = None, timeout: Optional[SecondsTimedelta] = None, max_tries: Optional[int] = None, ) -> Function: if isinstance(coroutine, Function): return coroutine if isinstance(coroutine, str): name = name or coroutine coroutine = import_string(coroutine) assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function' timeout = to_seconds(timeout) keep_result = to_seconds(keep_result) return Function(name or coroutine.__qualname__, coroutine, timeout, keep_result, max_tries)
Wrapper for a job function which lets you configure more settings. :param coroutine: coroutine function to call, can be a string to import :param name: name for function, if None, ``coroutine.__qualname__`` is used :param keep_result: duration to keep the result for, if 0 the result is not kept :param timeout: maximum time the job should take :param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying
def get_ga_client_id(self): request = self.get_ga_request() if not request or not hasattr(request, 'session'): return super(GARequestErrorReportingMixin, self).get_ga_client_id() if 'ga_client_id' not in request.session: client_id = self.ga_cookie_re.match(request.COOKIES.get('_ga', '')) client_id = client_id and client_id.group('cid') or str(uuid.uuid4()) request.session['ga_client_id'] = client_id return request.session['ga_client_id']
Retrieve the client ID from the Google Analytics cookie, if available, and save in the current session
def _read_para_seq_data(self, code, cbit, clen, *, desc, length, version): if clen != 4: raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format') _seqn = self._read_unpack(4) seq_data = dict( type=desc, critical=cbit, length=clen, seq=_seqn, ) return seq_data
Read HIP SEQ_DATA parameter. Structure of HIP SEQ_DATA parameter [RFC 6078]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Sequence number | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 seq_data.type Parameter Type 1 15 seq_data.critical Critical Bit 2 16 seq_data.length Length of Contents 4 32 seq_data.seq Sequence number
def cmd_karma_bulk(infile, jsonout, badonly, verbose): if verbose: logging.basicConfig(level=logging.INFO, format='%(message)s') data = infile.read() result = {} for ip in data.split('\n'): if ip: logging.info('Checking ' + ip) response = karma(ip) if response: result[ip] = response elif not badonly: result[ip] = ['CLEAN'] if jsonout: print(json.dumps(result, indent=4)) else: for k,v in result.items(): print(k, '\t', ','.join(v))
Show which IP addresses are inside blacklists using the Karma online service. Example: \b $ cat /var/log/auth.log | habu.extract.ipv4 | habu.karma.bulk 172.217.162.4 spamhaus_drop,alienvault_spamming 23.52.213.96 CLEAN 190.210.43.70 alienvault_malicious
def get_space_id(deployment_name, space_name, token_manager=None, app_url=defaults.APP_URL): spaces = get_spaces(deployment_name, token_manager=token_manager, app_url=app_url) for space in spaces: if space['name'] == space_name: return space['id'] raise JutException('Unable to find space "%s" within deployment "%s"' % (space_name, deployment_name))
get the space id that relates to the space name provided
def oauth_access( self, *, client_id: str, client_secret: str, code: str, **kwargs ) -> SlackResponse: kwargs.update( {"client_id": client_id, "client_secret": client_secret, "code": code} ) return self.api_call("oauth.access", data=kwargs)
Exchanges a temporary OAuth verifier code for an access token. Args: client_id (str): Issued when you created your application. e.g. '4b39e9-752c4' client_secret (str): Issued when you created your application. e.g. '33fea0113f5b1' code (str): The code param returned via the OAuth callback. e.g. 'ccdaa72ad'
def delete(self, indexes): indexes = [indexes] if not isinstance(indexes, (list, blist)) else indexes if all([isinstance(i, bool) for i in indexes]): if len(indexes) != len(self._index): raise ValueError('boolean indexes list must be same size of existing indexes') indexes = [i for i, x in enumerate(indexes) if x] else: indexes = [sorted_index(self._index, x) for x in indexes] if self._sort \ else [self._index.index(x) for x in indexes] indexes = sorted(indexes, reverse=True) for i in indexes: del self._data[i] for i in indexes: del self._index[i]
Delete rows from the DataFrame :param indexes: either a list of values or list of booleans for the rows to delete :return: nothing
def linear_deform(template, displacement, out=None): image_pts = template.space.points() for i, vi in enumerate(displacement): image_pts[:, i] += vi.asarray().ravel() values = template.interpolation(image_pts.T, out=out, bounds_check=False) return values.reshape(template.space.shape)
Linearized deformation of a template with a displacement field. The function maps a given template ``I`` and a given displacement field ``v`` to the new function ``x --> I(x + v(x))``. Parameters ---------- template : `DiscreteLpElement` Template to be deformed by a displacement field. displacement : element of power space of ``template.space`` Vector field (displacement field) used to deform the template. out : `numpy.ndarray`, optional Array to which the function values of the deformed template are written. It must have the same shape as ``template`` and a data type compatible with ``template.dtype``. Returns ------- deformed_template : `numpy.ndarray` Function values of the deformed template. If ``out`` was given, the returned object is a reference to it. Examples -------- Create a simple 1D template to initialize the operator and apply it to a displacement field. Where the displacement is zero, the output value is the same as the input value. In the 4-th point, the value is taken from 0.2 (one cell) to the left, i.e. 1.0. >>> space = odl.uniform_discr(0, 1, 5) >>> disp_field_space = space.tangent_bundle >>> template = space.element([0, 0, 1, 0, 0]) >>> displacement_field = disp_field_space.element([[0, 0, 0, -0.2, 0]]) >>> linear_deform(template, displacement_field) array([ 0., 0., 1., 1., 0.]) The result depends on the chosen interpolation. With 'linear' interpolation and an offset equal to half the distance between two points, 0.1, one gets the mean of the values. >>> space = odl.uniform_discr(0, 1, 5, interp='linear') >>> disp_field_space = space.tangent_bundle >>> template = space.element([0, 0, 1, 0, 0]) >>> displacement_field = disp_field_space.element([[0, 0, 0, -0.1, 0]]) >>> linear_deform(template, displacement_field) array([ 0. , 0. , 1. , 0.5, 0. ])
def set_lacp_timeout(self, name, value=None): commands = ['interface %s' % name] string = 'port-channel lacp fallback timeout' commands.append(self.command_builder(string, value=value)) return self.configure(commands)
Configures the Port-Channel LACP fallback timeout The fallback timeout configures the period an interface in fallback mode remains in LACP mode without receiving a PDU. Args: name(str): The Port-Channel interface name value(int): port-channel lacp fallback timeout in seconds Returns: True if the operation succeeds otherwise False is returned
def random_gate(qubits: Union[int, Qubits]) -> Gate: r N, qubits = qubits_count_tuple(qubits) unitary = scipy.stats.unitary_group.rvs(2**N) return Gate(unitary, qubits=qubits, name='RAND{}'.format(N))
r"""Returns a random unitary gate on K qubits. Ref: "How to generate random matrices from the classical compact groups" Francesco Mezzadri, math-ph/0609050
def from_handle(fh, stream_default='fasta'): if fh in (sys.stdin, sys.stdout, sys.stderr): return stream_default return from_filename(fh.name)
Look up the BioPython file type corresponding to a file-like object. For stdin, stdout, and stderr, ``stream_default`` is used.
def _rshift_arithmetic(self, shift_amount): if self.is_empty: return self nsplit = self._nsplit() if len(nsplit) == 1: highest_bit_set = self.lower_bound > StridedInterval.signed_max_int(nsplit[0].bits) l = self.lower_bound >> shift_amount u = self.upper_bound >> shift_amount stride = max(self.stride >> shift_amount, 1) mask = ((2 ** shift_amount - 1) << (self.bits - shift_amount)) if highest_bit_set: l = l | mask u = u | mask if l == u: stride = 0 return StridedInterval(bits=self.bits, lower_bound=l, upper_bound=u, stride=stride, uninitialized=self.uninitialized ) else: a = nsplit[0]._rshift_arithmetic(shift_amount) b = nsplit[1]._rshift_arithmetic(shift_amount) return a.union(b)
Arithmetic shift right with a concrete shift amount :param int shift_amount: Number of bits to shift right. :return: The new StridedInterval after right shifting :rtype: StridedInterval
def _apply(self, func, name, window=None, center=None, check_minp=None, **kwargs): def f(x, name=name, *args): x = self._shallow_copy(x) if isinstance(name, str): return getattr(x, name)(*args, **kwargs) return x.apply(name, *args, **kwargs) return self._groupby.apply(f)
Dispatch to apply; we are stripping all of the _apply kwargs and performing the original function call on the grouped object.
def highlight_min(self, subset=None, color='yellow', axis=0): return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
Highlight the minimum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler
def find_worktree_git_dir(dotgit): try: statbuf = os.stat(dotgit) except OSError: return None if not stat.S_ISREG(statbuf.st_mode): return None try: lines = open(dotgit, 'r').readlines() for key, value in [line.strip().split(': ') for line in lines]: if key == 'gitdir': return value except ValueError: pass return None
Search for a gitdir for this worktree.
def process_byte(self, tag): tag.set_address(self.normal_register.current_address) self.normal_register.move_to_next_address(1)
Process byte type tags
def _global_step(hparams): step = tf.to_float(tf.train.get_or_create_global_step()) multiplier = hparams.optimizer_multistep_accumulate_steps if not multiplier: return step tf.logging.info("Dividing global step by %d for multi-step optimizer." % multiplier) return step / tf.to_float(multiplier)
Adjust global step if a multi-step optimizer is used.
def get_flattened_bsp_keys_from_schema(schema): keys = [] for key in schema.declared_fields.keys(): field = schema.declared_fields[key] if isinstance(field, mm.fields.Nested) and \ isinstance(field.schema, BoundSpatialPoint): keys.append("{}.{}".format(key, "position")) return keys
Returns the flattened keys of BoundSpatialPoints in a schema :param schema: schema :return: list
def shear(cls, x_angle=0, y_angle=0): sx = math.tan(math.radians(x_angle)) sy = math.tan(math.radians(y_angle)) return tuple.__new__(cls, (1.0, sy, 0.0, sx, 1.0, 0.0, 0.0, 0.0, 1.0))
Create a shear transform along one or both axes. :param x_angle: Angle in degrees to shear along the x-axis. :type x_angle: float :param y_angle: Angle in degrees to shear along the y-axis. :type y_angle: float :rtype: Affine
def _complete_multipart_upload(self, bucket_name, object_name, upload_id, uploaded_parts): is_valid_bucket_name(bucket_name) is_non_empty_string(object_name) is_non_empty_string(upload_id) ordered_parts = [] for part in sorted(uploaded_parts.keys()): ordered_parts.append(uploaded_parts[part]) data = xml_marshal_complete_multipart_upload(ordered_parts) sha256_hex = get_sha256_hexdigest(data) md5_base64 = get_md5_base64digest(data) headers = { 'Content-Length': len(data), 'Content-Type': 'application/xml', 'Content-Md5': md5_base64, } response = self._url_open('POST', bucket_name=bucket_name, object_name=object_name, query={'uploadId': upload_id}, headers=headers, body=data, content_sha256=sha256_hex) return parse_multipart_upload_result(response.data)
Complete an active multipart upload request. :param bucket_name: Bucket name of the multipart request. :param object_name: Object name of the multipart request. :param upload_id: Upload id of the active multipart request. :param uploaded_parts: Key, Value dictionary of uploaded parts.
def metis(hdf5_file_name, N_clusters_max): file_name = wgraph(hdf5_file_name) labels = sgraph(N_clusters_max, file_name) subprocess.call(['rm', file_name]) return labels
METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph passed by CSPA. Parameters ---------- hdf5_file_name : string or file handle N_clusters_max : int Returns ------- labels : array of shape (n_samples,) A vector of labels denoting the cluster to which each sample has been assigned as a result of the CSPA heuristics for consensus clustering. Reference --------- G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for Partitioning Irregular Graphs" In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
def camelize(word): return ''.join(w[0].upper() + w[1:] for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
Convert a word from lower_with_underscores to CamelCase. Args: word: The string to convert. Returns: The modified string.
def channelize(gen, channels): def pick(g, channel): for samples in g: yield samples[channel] return [pick(gen_copy, channel) for channel, gen_copy in enumerate(itertools.tee(gen, channels))]
Break multi-channel generator into one sub-generator per channel Takes a generator producing n-tuples of samples and returns n generators, each producing samples for a single channel. Since multi-channel generators are the only reasonable way to synchronize samples across channels, and the sampler functions only take tuples of generators, you must use this function to process synchronized streams for output.
def fbank(wav_path, flat=True): (rate, sig) = wav.read(wav_path) if len(sig) == 0: logger.warning("Empty wav: {}".format(wav_path)) fbank_feat = python_speech_features.logfbank(sig, rate, nfilt=40) energy = extract_energy(rate, sig) feat = np.hstack([energy, fbank_feat]) delta_feat = python_speech_features.delta(feat, 2) delta_delta_feat = python_speech_features.delta(delta_feat, 2) all_feats = [feat, delta_feat, delta_delta_feat] if not flat: all_feats = np.array(all_feats) all_feats = np.swapaxes(all_feats, 0, 1) all_feats = np.swapaxes(all_feats, 1, 2) else: all_feats = np.concatenate(all_feats, axis=1) feat_fn = wav_path[:-3] + "fbank.npy" np.save(feat_fn, all_feats)
Currently grabs log Mel filterbank, deltas and double deltas.
def section_term_branch_orders(neurites, neurite_type=NeuriteType.all): return map_sections(sectionfunc.branch_order, neurites, neurite_type=neurite_type, iterator_type=Tree.ileaf)
Termination section branch orders in a collection of neurites
def _netstat_route_netbsd(): ret = [] cmd = 'netstat -f inet -rn | tail -n+5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'addr_family': 'inet', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[3], 'interface': comps[6]}) cmd = 'netstat -f inet6 -rn | tail -n+5' out = __salt__['cmd.run'](cmd, python_shell=True) for line in out.splitlines(): comps = line.split() ret.append({ 'addr_family': 'inet6', 'destination': comps[0], 'gateway': comps[1], 'netmask': '', 'flags': comps[3], 'interface': comps[6]}) return ret
Return netstat routing information for NetBSD
async def create_lease_store_if_not_exists_async(self): try: await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_container, self.lease_container_name)) except Exception as err: _logger.error("%r", err) raise err return True
Create the lease store if it does not exist, do nothing if it does exist. :return: `True` if the lease store already exists or was created successfully, `False` if not. :rtype: bool
def validate_subnet(s): if isinstance(s, basestring): if '/' in s: start, mask = s.split('/', 2) return validate_ip(start) and validate_netmask(mask) else: return False raise TypeError("expected string or unicode")
Validate a dotted-quad ip address including a netmask. The string is considered a valid dotted-quad address with netmask if it consists of one to four octets (0-255) seperated by periods (.) followed by a forward slash (/) and a subnet bitmask which is expressed in dotted-quad format. >>> validate_subnet('127.0.0.1/255.255.255.255') True >>> validate_subnet('127.0/255.0.0.0') True >>> validate_subnet('127.0/255') True >>> validate_subnet('127.0.0.256/255.255.255.255') False >>> validate_subnet('127.0.0.1/255.255.255.256') False >>> validate_subnet('127.0.0.0') False >>> validate_subnet(None) Traceback (most recent call last): ... TypeError: expected string or unicode :param s: String to validate as a dotted-quad ip address with netmask. :type s: str :returns: ``True`` if a valid dotted-quad ip address with netmask, ``False`` otherwise. :raises: TypeError
def expand(doc, doc_url="param://", params=None): if doc_url.find("://") == -1: Log.error("{{url}} must have a prototcol (eg http://) declared", url=doc_url) url = URL(doc_url) url.query = set_default(url.query, params) phase1 = _replace_ref(doc, url) phase2 = _replace_locals(phase1, [phase1]) return wrap(phase2)
ASSUMING YOU ALREADY PULED THE doc FROM doc_url, YOU CAN STILL USE THE EXPANDING FEATURE USE mo_json_config.expand({}) TO ASSUME CURRENT WORKING DIRECTORY :param doc: THE DATA STRUCTURE FROM JSON SOURCE :param doc_url: THE URL THIS doc CAME FROM (DEFAULT USES params AS A DOCUMENT SOURCE) :param params: EXTRA PARAMETERS NOT FOUND IN THE doc_url PARAMETERS (WILL SUPERSEDE PARAMETERS FROM doc_url) :return: EXPANDED JSON-SERIALIZABLE STRUCTURE
def addRectAnnot(self, rect): CheckParent(self) val = _fitz.Page_addRectAnnot(self, rect) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
Add a 'Rectangle' annotation.
def start_router(router_class, router_name): handle = router_class.remote(router_name) ray.experimental.register_actor(router_name, handle) handle.start.remote() return handle
Wrapper for starting a router and register it. Args: router_class: The router class to instantiate. router_name: The name to give to the router. Returns: A handle to newly started router actor.
def _to_event_data(obj): if obj is None: return None if isinstance(obj, bool): return obj if isinstance(obj, int): return obj if isinstance(obj, float): return obj if isinstance(obj, str): return obj if isinstance(obj, bytes): return obj if isinstance(obj, dict): return obj if isinstance(obj, NodeDriver): return obj.name if isinstance(obj, list): return [_to_event_data(item) for item in obj] event_data = {} for attribute_name in dir(obj): if attribute_name.startswith('_'): continue attribute_value = getattr(obj, attribute_name) if callable(attribute_value): continue event_data[attribute_name] = _to_event_data(attribute_value) return event_data
Convert the specified object into a form that can be serialised by msgpack as event data. :param obj: The object to convert.
def load_stock_quantity(self): info = StocksInfo(self.config) for stock in self.model.stocks: stock.quantity = info.load_stock_quantity(stock.symbol) info.gc_book.close()
Loads quantities for all stocks
def mri_knee_data_8_channel(): url = 'https://zenodo.org/record/800529/files/3_rawdata_knee_8ch.mat' dct = get_data('3_rawdata_knee_8ch.mat', subset=DATA_SUBSET, url=url) data = flip(np.swapaxes(dct['rawdata'], 0, -1) * 9e3, 2) return data
Raw data for 8 channel MRI of a knee. This is an SE measurement of the knee of a healthy volunteer. The data has been rescaled so that the reconstruction fits approximately in [0, 1]. See the data source with DOI `10.5281/zenodo.800529`_ or the `project webpage`_ for further information. See Also -------- mri_knee_inverse_8_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800529 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html
def insert(self, part): params = {k: str(v) for k,v in part.params.items()} res=c.create_assembly_instance(self.uri.as_dict(), part.uri.as_dict(), params) return res
Insert a part into this assembly. Args: - part (onshapepy.part.Part) A Part instance that will be inserted. Returns: - requests.Response: Onshape response data
def get_days_in_month(year: int, month: int) -> int: month_range = calendar.monthrange(year, month) return month_range[1]
Returns number of days in the given month. 1-based numbers as arguments. i.e. November = 11
def _control_longitude(self): if self.lonm < 0.0: self.lonm = 360.0 + self.lonm if self.lonM < 0.0: self.lonM = 360.0 + self.lonM if self.lonm > 360.0: self.lonm = self.lonm - 360.0 if self.lonM > 360.0: self.lonM = self.lonM - 360.0
Control on longitude values
def _handle_aleph_keyword_view(dataset): adder = ViewController.aleph_kw_handler.add_keyword for keyword in dataset.get("keyword_tags", []): adder(keyword["val"]) if "keyword_tags" in dataset: del dataset["keyword_tags"]
Translate the Aleph keywords to locally used data.
def _remove_white_background(image): from PIL import ImageMath, Image if image.mode == "RGBA": bands = image.split() a = bands[3] rgb = [ ImageMath.eval( 'convert(' 'float(x + a - 255) * 255.0 / float(max(a, 1)) * ' 'float(min(a, 1)) + float(x) * float(1 - min(a, 1))' ', "L")', x=x, a=a ) for x in bands[:3] ] return Image.merge(bands=rgb + [a], mode="RGBA") return image
Remove white background in the preview image.
def global_request(self, kind, data=None, wait=True): if wait: self.completion_event = threading.Event() m = Message() m.add_byte(cMSG_GLOBAL_REQUEST) m.add_string(kind) m.add_boolean(wait) if data is not None: m.add(*data) self._log(DEBUG, 'Sending global request "%s"' % kind) self._send_user_message(m) if not wait: return None while True: self.completion_event.wait(0.1) if not self.active: return None if self.completion_event.isSet(): break return self.global_response
Make a global request to the remote host. These are normally extensions to the SSH2 protocol. :param str kind: name of the request. :param tuple data: an optional tuple containing additional data to attach to the request. :param bool wait: ``True`` if this method should not return until a response is received; ``False`` otherwise. :return: a `.Message` containing possible additional data if the request was successful (or an empty `.Message` if ``wait`` was ``False``); ``None`` if the request was denied.
def init_app(self, app, config_prefix=None): self.kill_session = self.original_kill_session config_prefix = (config_prefix or 'JIRA').rstrip('_').upper() if not hasattr(app, 'extensions'): app.extensions = dict() if config_prefix.lower() in app.extensions: raise ValueError('Already registered config prefix {0!r}.'.format(config_prefix)) app.extensions[config_prefix.lower()] = _JIRAState(self, app) args = read_config(app.config, config_prefix) try: super(JIRA, self).__init__(**args) except ConnectionError: if not app.config.get('{0}_IGNORE_INITIAL_CONNECTION_FAILURE'.format(config_prefix)): raise LOG.exception('Ignoring ConnectionError.')
Actual method to read JIRA settings from app configuration and initialize the JIRA instance. Positional arguments: app -- Flask application instance. Keyword arguments: config_prefix -- Prefix used in config key names in the Flask app's configuration. Useful for applications which maintain two authenticated sessions with a JIRA server. Default is 'JIRA'. Will be converted to upper case. Examples: JIRA_SYSTEM_SERVER = 'http://jira.mycompany.com' JIRA_SYSTEM_USER = 'system_account' JIRA_SERVER = 'http://jira.mycompany.com' JIRA_TOKEN = '<token for oauthing users>'
def beam(problem, beam_size=100, iterations_limit=0, viewer=None): return _local_search(problem, _all_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
Beam search. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.value, and SearchProblem.generate_random_state.
def open(self, section_index=0): uri = self._sections[section_index][1] if len(uri.split()) == 1: self._open_url(uri) else: if self._verbose: print "running command: %s" % uri p = popen(uri, shell=True) p.wait()
Launch a help section.
def on_chord_part_return(self, task, state, result, propagate=False): u with transaction.atomic(): chord_data = ChordData.objects.select_for_update().get( callback_result__task_id=task.request.chord[u'options'][u'task_id'] ) _ = TaskMeta.objects.update_or_create( task_id=task.request.id, defaults={ u'status': state, u'result': result } ) if chord_data.is_ready(): self.get_suitable_app(current_app).tasks[u'celery.backend_cleanup'].apply_async() chord_data.execute_callback()
u""" Update the linking ChordData object and execute callback if needed. Parameters ---------- subtask: The subtask that just finished executing. Most useful values are stored on subtask.request. state: the status of the just-finished subtask. result: the resulting value of subtask execution. propagate: unused here, we check CELERY_CHORD_PROPAGATES and the chord's options in chord_data.execute_callback()
def set_switch_state(self, state): self.set_service_value( self.switch_service, 'Target', 'newTargetValue', state) self.set_cache_value('Status', state)
Set the switch state, also update local state.
def norm_squared(x, Mx=None, inner_product=ip_euclid): assert(len(x.shape) == 2) if Mx is None: rho = inner_product(x, x) else: assert(len(Mx.shape) == 2) rho = inner_product(x, Mx) if rho.shape == (1, 1): if abs(rho[0, 0].imag) > abs(rho[0, 0])*1e-10 or rho[0, 0].real < 0.0: raise InnerProductError(('<x,Mx> = %g. Is the inner product ' 'indefinite?') % rho[0, 0]) return numpy.linalg.norm(rho, 2)
Compute the norm^2 w.r.t. to a given scalar product.
def clone_repo(pkg, dest, repo, repo_dest, branch): git(['clone', '--recursive', '-b', branch, repo, repo_dest])
Clone the Playdoh repo into a custom path.
def apply_adaptation(self, target_illuminant, adaptation='bradford'): logger.debug(" \- Original illuminant: %s", self.illuminant) logger.debug(" \- Target illuminant: %s", target_illuminant) if self.illuminant != target_illuminant: logger.debug(" \* Applying transformation from %s to %s ", self.illuminant, target_illuminant) apply_chromatic_adaptation_on_color( color=self, targ_illum=target_illuminant, adaptation=adaptation)
This applies an adaptation matrix to change the XYZ color's illuminant. You'll most likely only need this during RGB conversions.
def combination(n, r): if n == r or r == 0: return 1 else: return combination(n-1, r-1) + combination(n-1, r)
This function calculates nCr.
def get(self): LOG.info('Returning all ansible runs') response = [] for run in self.backend_store.list_runs(): response.append(run_model.format_response(run)) return response
Get run list
def indexables(self): if self._indexables is None: d = self.description self._indexables = [GenericIndexCol(name='index', axis=0)] for i, n in enumerate(d._v_names): dc = GenericDataIndexableCol( name=n, pos=i, values=[n], version=self.version) self._indexables.append(dc) return self._indexables
create the indexables from the table description
def copystat(self, target): shutil.copystat(self.path, self._to_backend(target))
Copies the permissions, times and flags from this to the `target`. The owner is not copied.
def connect(*cmds, **kwargs): stdin = kwargs.get("stdin") env = kwargs.get("env", os.environ) timeout = kwargs.get("timeout") end = len(cmds) - 1 @contextmanager def inner(idx, inp): with stream(cmds[idx], stdin=inp, env=env, timeout=timeout) as s: if idx == end: yield s else: with inner(idx + 1, s) as c: yield c with inner(0, stdin) as s: yield s
Connects multiple command streams together and yields the final stream. Args: cmds (list): list of commands to pipe together. Each command will be an input to ``stream``. stdin (file like object): stream to use as the first command's standard input. env (dict): The environment in which to execute the commands. PATH should be defined. timeout (int): Amount of time in seconds to give the pipeline to complete. The ``timeout`` utility must be installed to use this feature. Yields: The output stream for the final command in the pipeline. It should typically be wrapped in a ``reader``.
def find_group_consistencies(groups1, groups2): r group1_list = {tuple(sorted(_group)) for _group in groups1} group2_list = {tuple(sorted(_group)) for _group in groups2} common_groups = list(group1_list.intersection(group2_list)) return common_groups
r""" Returns a measure of group consistency Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> groups1 = [[1, 2, 3], [4], [5, 6]] >>> groups2 = [[1, 2], [4], [5, 6]] >>> common_groups = find_group_consistencies(groups1, groups2) >>> result = ('common_groups = %r' % (common_groups,)) >>> print(result) common_groups = [(5, 6), (4,)]
def multiply(self, number): return self.from_list([x * number for x in self.to_list()])
Return a Vector as the product of the vector and a real number.
def calc_log_size(request, calc_id): try: response_data = logs.dbcmd('get_log_size', calc_id) except dbapi.NotFound: return HttpResponseNotFound() return HttpResponse(content=json.dumps(response_data), content_type=JSON)
Get the current number of lines in the log
def check_output(self, cmd, timeout=None, keep_rc=False, env=None): return subproc.call(cmd, timeout=timeout or self.timeout, keep_rc=keep_rc, env=env)
Subclasses can override to provide special environment setup, command prefixes, etc.
def output_file(self, _container): p = local.path(_container) if p.exists(): if not ui.ask("Path '{0}' already exists." " Overwrite?".format(p)): sys.exit(0) CFG["container"]["output"] = str(p)
Find and writes the output path of a chroot container.
def clear_and_configure(config=None, bind_in_runtime=True): with _INJECTOR_LOCK: clear() return configure(config, bind_in_runtime=bind_in_runtime)
Clear an existing injector and create another one with a callable config.
def validate_value(self, value): field = self.instance.preference.setup_field() value = field.to_python(value) field.validate(value) field.run_validators(value) return value
We call validation from the underlying form field
def mod_issquare(a, p): if not a: return True p1 = p // 2 p2 = pow(a, p1, p) return p2 == 1
Returns whether `a' is a square modulo p
def apply(self, doc): if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionSentences.apply() must be of type Document" ) for sentence in doc.sentences: yield TemporarySpanMention( char_start=0, char_end=len(sentence.text) - 1, sentence=sentence )
Generate MentionSentences from a Document by parsing all of its Sentences. :param doc: The ``Document`` to parse. :type doc: ``Document`` :raises TypeError: If the input doc is not of type ``Document``.
def msg_intro(self): delim = self.style.attr_minor(self.style.delimiter) txt = self.intro_msg_fmt.format(delim=delim).rstrip() return self.term.center(txt)
Introductory message disabled above heading.
def copy_script(self, filename, id_=-1): for repo in self._children: repo.copy_script(filename, id_)
Copy a script to all repositories. Takes into account whether a JSS has been migrated. See the individual DistributionPoint types for more information. Args: filename: String path to the local file to copy. id_: Integer ID you wish to associate script with for a JDS or CDP only. Default is -1, which is used for creating a new script object in the database.
def simple_moving_matrix(x, n=10): if x.ndim > 1 and len(x[0]) > 1: x = np.average(x, axis=1) h = n / 2 o = 0 if h * 2 == n else 1 xx = [] for i in range(h, len(x) - h): xx.append(x[i-h:i+h+o]) return np.array(xx)
Create simple moving matrix. Parameters ---------- x : ndarray A numpy array n : integer The number of sample points used to make average Returns ------- ndarray A n x n numpy array which will be useful for calculating confidentail interval of simple moving average
def state_range_type(self) -> Sequence[str]: fluents = self.domain.state_fluents ordering = self.domain.state_fluent_ordering return self._fluent_range_type(fluents, ordering)
The range type of each state fluent in canonical order. Returns: Sequence[str]: A tuple of range types representing the range of each fluent.
def initialize_model(self, root_node): LOGGER.debug("> Initializing model with '{0}' root node.".format(root_node)) self.beginResetModel() self.root_node = root_node self.enable_model_triggers(True) self.endResetModel() return True
Initializes the Model using given root node. :param root_node: Graph root node. :type root_node: DefaultNode :return: Method success :rtype: bool
def mysql_batch_and_fetch(mysql_config, *sql_queries): import MySQLdb as mydb import sys import gc if len(sql_queries) == 1: if isinstance(sql_queries[0], str): sql_queries = sql_queries[0].split(";") if isinstance(sql_queries[0], (list, tuple)): sql_queries = sql_queries[0] try: conn = mydb.connect(**mysql_config) curs = conn.cursor() for sql_query in sql_queries: if len(sql_query) > 0: curs.execute(sql_query) result_table = curs.fetchall() except mydb.Error as err: print(err) gc.collect() sys.exit(1) else: if conn: conn.close() gc.collect() return result_table
Excute a series of SQL statements before the final Select query Parameters ---------- mysql_config : dict The user credentials as defined in MySQLdb.connect, e.g. mysql_conig = {'user': 'myname', 'passwd': 'supersecret', 'host': '<ip adress or domain>', 'db': '<myschema>'} sql_queries : list or tuple A list or tuple of SQL queries wheras the last SQL command have to be final Select query. (If a string is provided the semicolon ";" is used to split the string into a list of strings) Returns ------- result_table : tuple The result table as tuple of tuples. Sources ------- * http://mysqlclient.readthedocs.io/user_guide.html
def _get_group_difference(self, sp_groups): db_groups = set(Group.objects.all().values_list('name', flat=True)) missing_from_db = set(sp_groups).difference(db_groups) missing_from_sp = db_groups.difference(sp_groups) return (missing_from_db, missing_from_sp)
Helper method for gettings the groups that are present in the local db but not on stormpath and the other way around.
def _unquote_or_none(s: Optional[str]) -> Optional[bytes]: if s is None: return s return url_unescape(s, encoding=None, plus=False)
None-safe wrapper around url_unescape to handle unmatched optional groups correctly. Note that args are passed as bytes so the handler can decide what encoding to use.
def filter_by_func(self, func:Callable)->'ItemList': "Only keep elements for which `func` returns `True`." self.items = array([o for o in self.items if func(o)]) return self
Only keep elements for which `func` returns `True`.
def buffered_generator(source_gen, buffer_size=2, use_multiprocessing=False): r if buffer_size < 2: raise RuntimeError("Minimal buffer_ size is 2!") if use_multiprocessing: print('WARNING seems to freeze if passed in a generator') if False: pool = multiprocessing.Pool(processes=get_default_numprocs(), initializer=init_worker, maxtasksperchild=None) Process = pool.Process else: Process = multiprocessing.Process _Queue = multiprocessing.Queue target = _buffered_generation_process else: _Queue = queue.Queue Process = KillableThread target = _buffered_generation_thread buffer_ = _Queue(maxsize=buffer_size - 1) sentinal = StopIteration process = Process( target=target, args=(iter(source_gen), buffer_, sentinal) ) process.daemon = True process.start() while True: output = buffer_.get() if output is sentinal: raise StopIteration yield output
r""" Generator that runs a slow source generator in a separate process. My generate function still seems faster on test cases. However, this function is more flexible in its compatability. Args: source_gen (iterable): slow generator buffer_size (int): the maximal number of items to pre-generate (length of the buffer) (default = 2) use_multiprocessing (bool): if False uses GIL-hindered threading instead of multiprocessing (defualt = False). Note: use_multiprocessing = True seems to freeze if passed in a generator built by six.moves.map. References: Taken from Sander Dieleman's data augmentation pipeline https://github.com/benanne/kaggle-ndsb/blob/11a66cdbddee16c69514b9530a727df0ac6e136f/buffering.py CommandLine: python -m utool.util_parallel --test-buffered_generator:0 python -m utool.util_parallel --test-buffered_generator:1 Ignore: >>> #functime = timeit.timeit( >>> # 'ut.is_prime(' + str(prime) + ')', setup='import utool as ut', >>> # number=500) / 1000.0 Example: >>> # DISABLE_DOCTEST >>> # UNSTABLE_DOCTEST >>> from utool.util_parallel import * # NOQA >>> import utool as ut >>> num = 2 ** 14 >>> func = ut.is_prime >>> data = [38873] * num >>> data = list(range(num)) >>> with ut.Timer('serial') as t1: ... result1 = list(map(func, data)) >>> with ut.Timer('ut.generate2') as t3: ... result3 = list(ut.generate2(func, zip(data), chunksize=2, quiet=1, verbose=0)) >>> with ut.Timer('ut.buffered_generator') as t2: ... result2 = list(ut.buffered_generator(map(func, data))) >>> assert len(result1) == num and len(result2) == num and len(result3) == num >>> assert result3 == result2, 'inconsistent results' >>> assert result1 == result2, 'inconsistent results' Example1: >>> # DISABLE_DOCTEST >>> # VERYSLLOOWWW_DOCTEST >>> from utool.util_parallel import _test_buffered_generator >>> _test_buffered_generator2()
def _find_vm(name, data, quiet=False): for hv_ in data: if not isinstance(data[hv_], dict): continue if name in data[hv_].get('vm_info', {}): ret = {hv_: {name: data[hv_]['vm_info'][name]}} if not quiet: __jid_event__.fire_event({'data': ret, 'outputter': 'nested'}, 'progress') return ret return {}
Scan the query data for the named VM
def get_title(self): def _title(context_model): context = context_model.context() if context is None: return "new context*" title = os.path.basename(context.load_path) if context.load_path \ else "new context" if context_model.is_modified(): title += '*' return title if self.diff_mode: diff_title = _title(self.diff_context_model) if self.diff_from_source: diff_title += "'" return "%s %s %s" % (_title(self.context_model), self.short_double_arrow, diff_title) else: return _title(self.context_model)
Returns a string suitable for titling a window containing this table.
def find_path(self, basename, install_dir=None): for dir in self.find_path_dirs: path = os.path.join(dir, basename) if os.path.exists(path): return path return os.path.join(install_dir or self.preferred_install_dir, basename)
Look in a few places for a file with the given name. If a custom version of the file is found in the directory being managed by this workspace, return it. Otherwise look in the custom and default input directories in the root directory, and then finally in the root directory itself. This function makes it easy to provide custom parameters to any stage to the design pipeline. Just place the file with the custom parameters in a directory associated with that stage.
def Subclasses(cls, sort_by=None, reverse=False): l = list() for attr, value in get_all_attributes(cls): try: if issubclass(value, Constant): l.append((attr, value)) except: pass if sort_by is None: sort_by = "__creation_index__" l = list( sorted(l, key=lambda x: getattr(x[1], sort_by), reverse=reverse)) return l
Get all nested Constant class and it's name pair. :param sort_by: the attribute name used for sorting. :param reverse: if True, return in descend order. :returns: [(attr, value),...] pairs. :: >>> class MyClass(Constant): ... a = 1 # non-class attributre ... b = 2 # non-class attributre ... ... class C(Constant): ... pass ... ... class D(Constant): ... pass >>> MyClass.Subclasses() [("C", MyClass.C), ("D", MyClass.D)] .. versionadded:: 0.0.3
def _check_input_names(symbol, names, typename, throw): args = symbol.list_arguments() for name in names: if name in args: continue candidates = [arg for arg in args if not arg.endswith('_weight') and not arg.endswith('_bias') and not arg.endswith('_gamma') and not arg.endswith('_beta')] msg = "\033[91mYou created Module with Module(..., %s_names=%s) but " \ "input with name '%s' is not found in symbol.list_arguments(). " \ "Did you mean one of:\n\t%s\033[0m"%( typename, str(names), name, '\n\t'.join(candidates)) if throw: raise ValueError(msg) else: warnings.warn(msg)
Check that all input names are in symbol's arguments.
def getRankMaps(self): rankMaps = [] for preference in self.preferences: rankMaps.append(preference.getRankMap()) return rankMaps
Returns a list of dictionaries, one for each preference, that associates the integer representation of each candidate with its position in the ranking, starting from 1 and returns a list of the number of times each preference is given.
def is_expired(self): if time.time() - self.last_ping > HB_PING_TIME: self.ping() return (time.time() - self.last_pong) > HB_PING_TIME + HB_PONG_TIME
Indicates if connection has expired.
def cast_pars_dict(pars_dict): o = {} for pname, pdict in pars_dict.items(): o[pname] = {} for k, v in pdict.items(): if k == 'free': o[pname][k] = bool(int(v)) elif k == 'name': o[pname][k] = v else: o[pname][k] = float(v) return o
Cast the bool and float elements of a parameters dict to the appropriate python types.
def filter_record(self, record): if len(record) >= self.max_length: return record[:self.max_length] else: return record
Filter record, truncating any over some maximum length
def mutateSequence(seq, distance): subProb=distance inProb=0.05*distance deProb=0.05*distance contProb=0.9 l = [] bases = [ 'A', 'C', 'T', 'G' ] i=0 while i < len(seq): if random.random() < subProb: l.append(random.choice(bases)) else: l.append(seq[i]) if random.random() < inProb: l += getRandomSequence(_expLength(0, contProb))[1] if random.random() < deProb: i += int(_expLength(0, contProb)) i += 1 return "".join(l)
Mutates the DNA sequence for use in testing.
def read_stdout(self): output = "" if self._stdout_file: try: with open(self._stdout_file, "rb") as file: output = file.read().decode("utf-8", errors="replace") except OSError as e: log.warning("Could not read {}: {}".format(self._stdout_file, e)) return output
Reads the standard output of the QEMU process. Only use when the process has been stopped or has crashed.
def searchInAleph(base, phrase, considerSimilar, field): downer = Downloader() if field.lower() not in VALID_ALEPH_FIELDS: raise InvalidAlephFieldException("Unknown field '" + field + "'!") param_url = Template(SEARCH_URL_TEMPLATE).substitute( PHRASE=quote_plus(phrase), BASE=base, FIELD=field, SIMILAR="Y" if considerSimilar else "N" ) result = downer.download(ALEPH_URL + param_url) dom = dhtmlparser.parseString(result) find = dom.find("find") if len(find) <= 0: raise AlephException("Aleph didn't returned any information.") find = find[0] result = _alephResultToDict(find) result["base"] = base if "error" not in result: return result if result["error"] == "empty set": result["no_entries"] = 0 return result else: raise AlephException(result["error"])
Send request to the aleph search engine. Request itself is pretty useless, but it can be later used as parameter for :func:`getDocumentIDs`, which can fetch records from Aleph. Args: base (str): which database you want to use phrase (str): what do you want to search considerSimilar (bool): fuzzy search, which is not working at all, so don't use it field (str): where you want to look (see: :attr:`VALID_ALEPH_FIELDS`) Returns: dictionary: consisting from following fields: | error (optional): present if there was some form of error | no_entries (int): number of entries that can be fetch from aleph | no_records (int): no idea what is this, but it is always >= than `no_entries` | set_number (int): important - something like ID of your request | session-id (str): used to count users for licensing purposes Example: Returned dict:: { 'session-id': 'YLI54HBQJESUTS678YYUNKEU4BNAUJDKA914GMF39J6K89VSCB', 'set_number': 36520, 'no_records': 1, 'no_entries': 1 } Raises: AlephException: if Aleph doesn't return any information InvalidAlephFieldException: if specified field is not valid
def respond_client(self, answer, socket): response = pickle.dumps(answer, -1) socket.sendall(response) self.read_list.remove(socket) socket.close()
Send an answer to the client.
def _read_message(self): payload_info = self._read_bytes_from_socket(4) read_len = unpack(">I", payload_info)[0] payload = self._read_bytes_from_socket(read_len) message = cast_channel_pb2.CastMessage() message.ParseFromString(payload) return message
Reads a message from the socket and converts it to a message.
def split_css_classes(css_classes): classes_list = text_value(css_classes).split(" ") return [c for c in classes_list if c]
Turn string into a list of CSS classes
def set_property_filter(filter_proto, name, op, value): filter_proto.Clear() pf = filter_proto.property_filter pf.property.name = name pf.op = op set_value(pf.value, value) return filter_proto
Set property filter contraint in the given datastore.Filter proto message. Args: filter_proto: datastore.Filter proto message name: property name op: datastore.PropertyFilter.Operation value: property value Returns: the same datastore.Filter. Usage: >>> set_property_filter(filter_proto, 'foo', ... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
def _get_object_pydoc_page_name(obj): page_name = fullqualname.fullqualname(obj) if page_name is not None: page_name = _remove_builtin_prefix(page_name) return page_name
Returns fully qualified name, including module name, except for the built-in module.
def filter_by_transcript_expression( self, transcript_expression_dict, min_expression_value=0.0): return self.filter_any_above_threshold( multi_key_fn=lambda variant: variant.transcript_ids, value_dict=transcript_expression_dict, threshold=min_expression_value)
Filters variants down to those which have overlap a transcript whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- transcript_expression_dict : dict Dictionary mapping Ensembl transcript IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
def options(self, **options): for k in options: self._jwrite = self._jwrite.option(k, to_str(options[k])) return self
Adds output options for the underlying data source. You can set the following option(s) for writing files: * ``timeZone``: sets the string that indicates a timezone to be used to format timestamps in the JSON/CSV datasources or partition values. If it isn't set, it uses the default value, session local timezone.
def install_egg(self, egg_name): if not os.path.exists(self.egg_directory): os.makedirs(self.egg_directory) self.requirement_set.add_requirement( InstallRequirement.from_line(egg_name, None)) try: self.requirement_set.prepare_files(self.finder) self.requirement_set.install(['--prefix=' + self.egg_directory], []) except DistributionNotFound: self.requirement_set.requirements._keys.remove(egg_name) raise PipException()
Install an egg into the egg directory
def set_children(self, child_ids): if not isinstance(child_ids, list): raise errors.InvalidArgument() if self.get_children_metadata().is_read_only(): raise errors.NoAccess() idstr_list = [] for object_id in child_ids: if not self._is_valid_id(object_id): raise errors.InvalidArgument() if str(object_id) not in idstr_list: idstr_list.append(str(object_id)) self._my_map['childIds'] = idstr_list
Sets the children. arg: child_ids (osid.id.Id[]): the children``Ids`` raise: InvalidArgument - ``child_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def list_compatible_canvas_layers(self): italic_font = QFont() italic_font.setItalic(True) list_widget = self.lstCanvasHazLayers list_widget.clear() for layer in self.parent.get_compatible_canvas_layers('hazard'): item = QListWidgetItem(layer['name'], list_widget) item.setData(Qt.UserRole, layer['id']) if not layer['keywords']: item.setFont(italic_font) list_widget.addItem(item)
Fill the list widget with compatible layers. :returns: Metadata of found layers. :rtype: list of dicts
def release_lock(dax, key, lock_mode=LockMode.wait): lock_fxn = _lock_fxn("unlock", lock_mode, False) return dax.get_scalar( dax.callproc(lock_fxn, key if isinstance(key, (list, tuple)) else [key])[0])
Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum