code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def push_uci(self, uci: str) -> Move: move = self.parse_uci(uci) self.push(move) return move
Parses a move in UCI notation and puts it on the move stack. Returns the move. :raises: :exc:`ValueError` if the move is invalid or illegal in the current position (but not a null move).
def lemma(lemma_key): if lemma_key in LEMMAS_DICT: return LEMMAS_DICT[lemma_key] split_lemma_key = lemma_key.split('.') synset_key = '.'.join(split_lemma_key[:3]) lemma_literal = split_lemma_key[3] lemma_obj = Lemma(synset_key,lemma_literal) LEMMAS_DICT[lemma_key] = lemma_obj return lemma_obj
Returns the Lemma object with the given key. Parameters ---------- lemma_key : str Key of the returned lemma. Returns ------- Lemma Lemma matching the `lemma_key`.
def parent_org_sdo_ids(self): return [sdo.get_owner()._narrow(SDOPackage.SDO).get_sdo_id() \ for sdo in self._obj.get_organizations() if sdo]
The SDO IDs of the compositions this RTC belongs to.
def _on_shortcut_changed(self, renderer, path, new_shortcuts): action = self.shortcut_list_store[int(path)][self.KEY_STORAGE_ID] old_shortcuts = self.gui_config_model.get_current_config_value("SHORTCUTS", use_preliminary=True)[action] from ast import literal_eval try: new_shortcuts = literal_eval(new_shortcuts) if not isinstance(new_shortcuts, list) and \ not all([isinstance(shortcut, string_types) for shortcut in new_shortcuts]): raise ValueError() except (ValueError, SyntaxError): logger.warning("Shortcuts must be a list of strings") new_shortcuts = old_shortcuts shortcuts = self.gui_config_model.get_current_config_value("SHORTCUTS", use_preliminary=True, default={}) shortcuts[action] = new_shortcuts self.gui_config_model.set_preliminary_config_value("SHORTCUTS", shortcuts) self._select_row_by_column_value(self.view['shortcut_tree_view'], self.shortcut_list_store, self.KEY_STORAGE_ID, action)
Callback handling a change of a shortcut :param Gtk.CellRenderer renderer: Cell renderer showing the shortcut :param path: Path of shortcuts within the list store :param str new_shortcuts: New shortcuts
def mtxvg(m1, v2, ncol1, nr1r2): m1 = stypes.toDoubleMatrix(m1) v2 = stypes.toDoubleVector(v2) ncol1 = ctypes.c_int(ncol1) nr1r2 = ctypes.c_int(nr1r2) vout = stypes.emptyDoubleVector(ncol1.value) libspice.mtxvg_c(m1, v2, ncol1, nr1r2, vout) return stypes.cVectorToPython(vout)
Multiply the transpose of a matrix and a vector of arbitrary size. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxvg_c.html :param m1: Left-hand matrix to be multiplied. :type m1: NxM-Element Array of floats :param v2: Right-hand vector to be multiplied. :type v2: Array of floats :param ncol1: Column dimension of m1 and length of vout. :type ncol1: int :param nr1r2: Row dimension of m1 and length of v2. :type nr1r2: int :return: Product vector m1 transpose * v2. :rtype: Array of floats
def get_model(app_label, model_name): try: from django.apps import apps from django.core.exceptions import AppRegistryNotReady except ImportError: from django.db import models return models.get_model(app_label, model_name) try: return apps.get_model(app_label, model_name) except AppRegistryNotReady: if apps.apps_ready and not apps.models_ready: app_config = apps.get_app_config(app_label) import_module("%s.%s" % (app_config.name, "models")) return apps.get_registered_model(app_label, model_name) else: raise
Fetches a Django model using the app registry. This doesn't require that an app with the given app label exists, which makes it safe to call when the registry is being populated. All other methods to access models might raise an exception about the registry not being ready yet. Raises LookupError if model isn't found.
def nanstd(values, axis=None, skipna=True, ddof=1, mask=None): result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)) return _wrap_results(result, values.dtype)
Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0
def explode(col): sc = SparkContext._active_spark_context jc = sc._jvm.functions.explode(_to_java_column(col)) return Column(jc)
Returns a new row for each element in the given array or map. Uses the default column name `col` for elements in the array and `key` and `value` for elements in the map unless specified otherwise. >>> from pyspark.sql import Row >>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})]) >>> eDF.select(explode(eDF.intlist).alias("anInt")).collect() [Row(anInt=1), Row(anInt=2), Row(anInt=3)] >>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show() +---+-----+ |key|value| +---+-----+ | a| b| +---+-----+
def variable_summaries(vars_, groups=None, scope='weights'): groups = groups or {r'all': r'.*'} grouped = collections.defaultdict(list) for var in vars_: for name, pattern in groups.items(): if re.match(pattern, var.name): name = re.sub(pattern, name, var.name) grouped[name].append(var) for name in groups: if name not in grouped: tf.logging.warn("No variables matching '{}' group.".format(name)) summaries = [] for name, vars_ in grouped.items(): vars_ = [tf.reshape(var, [-1]) for var in vars_] vars_ = tf.concat(vars_, 0) summaries.append(tf.summary.histogram(scope + '/' + name, vars_)) return tf.summary.merge(summaries)
Create histogram summaries for the provided variables. Summaries can be grouped via regexes matching variables names. Args: vars_: List of variables to summarize. groups: Mapping of name to regex for grouping summaries. scope: Name scope for this operation. Returns: Summary tensor.
def read_name(source, position): body = source.body body_length = len(body) end = position + 1 while end != body_length: code = char_code_at(body, end) if not ( code is not None and ( code == 95 or 48 <= code <= 57 or 65 <= code <= 90 or 97 <= code <= 122 ) ): break end += 1 return Token(TokenKind.NAME, position, end, body[position:end])
Reads an alphanumeric + underscore name from the source. [_A-Za-z][_0-9A-Za-z]*
def get_lat_lon_time_from_nmea(nmea_file, local_time=True): with open(nmea_file, "r") as f: lines = f.readlines() lines = [l.rstrip("\n\r") for l in lines] for l in lines: if "GPRMC" in l: data = pynmea2.parse(l) date = data.datetime.date() break points = [] for l in lines: if "GPRMC" in l: data = pynmea2.parse(l) date = data.datetime.date() if "$GPGGA" in l: data = pynmea2.parse(l) timestamp = datetime.datetime.combine(date, data.timestamp) lat, lon, alt = data.latitude, data.longitude, data.altitude points.append((timestamp, lat, lon, alt)) points.sort() return points
Read location and time stamps from a track in a NMEA file. Returns a list of tuples (time, lat, lon). GPX stores time in UTC, by default we assume your camera used the local time and convert accordingly.
def RebuildHttpConnections(http): if getattr(http, 'connections', None): for conn_key in list(http.connections.keys()): if ':' in conn_key: del http.connections[conn_key]
Rebuilds all http connections in the httplib2.Http instance. httplib2 overloads the map in http.connections to contain two different types of values: { scheme string: connection class } and { scheme + authority string : actual http connection } Here we remove all of the entries for actual connections so that on the next request httplib2 will rebuild them from the connection types. Args: http: An httplib2.Http instance.
def MakeRequest(self, data): stats_collector_instance.Get().IncrementCounter("grr_client_sent_bytes", len(data)) response = self.http_manager.OpenServerEndpoint( path="control?api=%s" % config.CONFIG["Network.api"], verify_cb=self.VerifyServerControlResponse, data=data, headers={"Content-Type": "binary/octet-stream"}) if response.code == 406: self.InitiateEnrolment() return response if response.code == 200: stats_collector_instance.Get().IncrementCounter( "grr_client_received_bytes", len(response.data)) return response return response
Make a HTTP Post request to the server 'control' endpoint.
def compute_norrec_differences(df, keys_diff): raise Exception('This function is depreciated!') print('computing normal-reciprocal differences') def norrec_diff(x): if x.shape[0] != 2: return np.nan else: return np.abs(x.iloc[1] - x.iloc[0]) keys_keep = list(set(df.columns.tolist()) - set(keys_diff)) agg_dict = {x: _first for x in keys_keep} agg_dict.update({x: norrec_diff for x in keys_diff}) for key in ('id', 'timestep', 'frequency'): if key in agg_dict: del(agg_dict[key]) df = df.groupby(('timestep', 'frequency', 'id')).agg(agg_dict) df.reset_index() return df
DO NOT USE ANY MORE - DEPRECIATED!
def create( self, order_increment_id, creditmemo_data=None, comment=None, email=False, include_comment=False, refund_to_store_credit_amount=None): if comment is None: comment = '' return self.call( 'sales_order_creditmemo.create', [ order_increment_id, creditmemo_data, comment, email, include_comment, refund_to_store_credit_amount ] )
Create new credit_memo for order :param order_increment_id: Order Increment ID :type order_increment_id: str :param creditmemo_data: Sales order credit memo data (optional) :type creditmemo_data: associative array as dict { 'qtys': [ { 'order_item_id': str, # Order item ID to be refunded 'qty': int # Items quantity to be refunded }, ... ], 'shipping_amount': float # refund shipping amount (optional) 'adjustment_positive': float # adjustment refund amount (optional) 'adjustment_negative': float # adjustment fee amount (optional) } :param comment: Credit memo Comment :type comment: str :param email: send e-mail flag (optional) :type email: bool :param include_comment: include comment in e-mail flag (optional) :type include_comment: bool :param refund_to_store_credit_amount: amount to refund to store credit :type refund_to_store_credit_amount: float :return str, increment id of credit memo created
def find_d2ifile(flist,detector): d2ifile = None for f in flist: fdet = fits.getval(f, 'detector', memmap=False) if fdet == detector: d2ifile = f return d2ifile
Search a list of files for one that matches the detector specified.
def get_ride_details(api_client, ride_id): try: ride_details = api_client.get_ride_details(ride_id) except (ClientError, ServerError) as error: fail_print(error) else: success_print(ride_details.json)
Use an UberRidesClient to get ride details and print the results. Parameters api_client (UberRidesClient) An authorized UberRidesClient with 'request' scope. ride_id (str) Unique ride identifier.
def get_python_version(path): version_cmd = [path, "-c", "import sys; print(sys.version.split()[0])"] try: c = vistir.misc.run( version_cmd, block=True, nospin=True, return_object=True, combine_stderr=False, write_to_stdout=False, ) except OSError: raise InvalidPythonVersion("%s is not a valid python path" % path) if not c.out: raise InvalidPythonVersion("%s is not a valid python path" % path) return c.out.strip()
Get python version string using subprocess from a given path.
def serialise(self, default_endianness=None): endianness = (default_endianness or DEFAULT_ENDIANNESS) if hasattr(self, '_Meta'): endianness = self._Meta.get('endianness', endianness) inferred_fields = set() for k, v in iteritems(self._type_mapping): inferred_fields |= {x._name for x in v.dependent_fields()} for field in inferred_fields: setattr(self, field, None) for k, v in iteritems(self._type_mapping): v.prepare(self, getattr(self, k)) message = b'' for k, v in iteritems(self._type_mapping): message += v.value_to_bytes(self, getattr(self, k), default_endianness=endianness) return message
Serialise a message, without including any framing. :param default_endianness: The default endianness, unless overridden by the fields or class metadata. Should usually be left at ``None``. Otherwise, use ``'<'`` for little endian and ``'>'`` for big endian. :type default_endianness: str :return: The serialised message. :rtype: bytes
def create(self, parameters={}, create_keys=True, **kwargs): cs = self._create_service(parameters=parameters, **kwargs) if create_keys: cfg = parameters cfg.update(self._get_service_config()) self.settings.save(cfg)
Create the service.
def is_filter_tuple(tup): return isinstance(tup, (tuple, list)) and ( len(tup) == 3 and isinstance(tup[0], string_types) and callable(tup[1]))
Return whether a `tuple` matches the format for a column filter
def _get_path_entry_from_string(self, query_string, first_found=True, full_path=False): iter_matches = gen_dict_key_matches(query_string, self.config_file_contents, full_path=full_path) try: return next(iter_matches) if first_found else iter_matches except (StopIteration, TypeError): raise errors.ResourceNotFoundError('Could not find search string %s in the config file contents %s' % (query_string, self.config_file_contents))
Parses a string to form a list of strings that represents a possible config entry header :param query_string: str, query string we are looking for :param first_found: bool, return first found entry or entire list :param full_path: bool, whether to return each entry with their corresponding config entry path :return: (Generator((list, str, dict, OrderedDict)), config entries that match the query string :raises: exceptions.ResourceNotFoundError
def load_module(self, filename): if not isinstance(filename, string_types): return filename basename = os.path.splitext(os.path.basename(filename))[0] basename = basename.replace('.bench', '') modulename = 'benchmarks.{0}'.format(basename) return load_module(modulename, filename)
Load a benchmark module from file
def harvest(self): if self.perform_initialization() is not None: self.process_items() self.finalize() return self.job
Start the harvesting process
def fetchText(cls, url, data, textSearch, optional): if cls.css: searchFun = data.cssselect else: searchFun = data.xpath if textSearch: text = '' for match in searchFun(textSearch): try: text += ' ' + match.text_content() except AttributeError: text += ' ' + unicode(match) if text.strip() == '': if optional: return None else: raise ValueError("XPath %s did not match anything at URL %s." % (textSearch, url)) out.debug(u'Matched text %r with XPath %s' % (text, textSearch)) return unescape(text).strip() else: return None
Search text entry for given text XPath in a HTML page.
def deleteCertificate(self, certName): params = {"f" : "json"} url = self._url + "/sslCertificates/{cert}/delete".format( cert=certName) return self._post(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
This operation deletes an SSL certificate from the key store. Once a certificate is deleted, it cannot be retrieved or used to enable SSL. Inputs: certName - name of the cert to delete
def to_networkx(self): return nx_util.to_networkx(self.session.get(self.__url).json())
Return this network in NetworkX graph object. :return: Network as NetworkX graph object
def main(port=8222): loop = asyncio.get_event_loop() environ = {'hello': 'world'} def create_server(): return MySSHServer(lambda: environ) print('Listening on :%i' % port) print('To connect, do "ssh localhost -p %i"' % port) loop.run_until_complete( asyncssh.create_server(create_server, '', port, server_host_keys=['/etc/ssh/ssh_host_dsa_key'])) loop.run_forever()
Example that starts the REPL through an SSH server.
def get_client_class(self, client_class_name): request_url = self._build_url(['ClientClass', client_class_name]) return self._do_request('GET', request_url)
Returns a specific client class details from CPNR server.
def build_strings(self): for idx, dev in enumerate(self.devices): header = 'system.' + dev self.gcalls[idx] = header + '.gcall(system.dae)\n' self.fcalls[idx] = header + '.fcall(system.dae)\n' self.gycalls[idx] = header + '.gycall(system.dae)\n' self.fxcalls[idx] = header + '.fxcall(system.dae)\n' self.jac0s[idx] = header + '.jac0(system.dae)\n'
build call string for each device
def _cancel_orphan_orders(self, orderId): orders = self.ibConn.orders for order in orders: order = orders[order] if order['parentId'] != orderId: self.ibConn.cancelOrder(order['id'])
cancel child orders when parent is gone
def update(self, app_model, forbidden_keys=None, inverse=False): if forbidden_keys is None: forbidden_keys = [] update_model(self, app_model, forbidden_keys, inverse)
Updates the raw model. Consult `zsl.utils.model_helper.update_model`.
def ws_db996(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `ws_db996`'.format(value)) self._ws_db996 = value
Corresponds to IDD Field `ws_db996` Mean wind speed coincident with 99.6% dry-bulb temperature Args: value (float): value for IDD Field `ws_db996` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def get_first_rec(fastafile): f = list(SeqIO.parse(fastafile, "fasta")) if len(f) > 1: logging.debug("{0} records found in {1}, using the first one". format(len(f), fastafile)) return f[0]
Returns the first record in the fastafile
def sizeOfOverlap(self, e): if not self.intersects(e): return 0 if e.start >= self.start and e.end <= self.end: return len(e) if self.start >= e.start and self.end <= e.end: return len(self) if e.start > self.start: return (self.end - e.start) if self.start > e.start: return (e.end - self.start)
Get the size of the overlap between self and e. :return: the number of bases that are shared in common between self and e.
def buffer(self): if self._buf_in_use: raise RuntimeError("nested use of buffer() is not supported") self._buf_in_use = True old_write = self._write old_flush = self._flush if self._buf is None: self._buf = io.BytesIO() else: try: self._buf.seek(0) self._buf.truncate() except BufferError: self._buf = io.BytesIO() self._write = self._buf.write self._flush = None try: with self._save_state(): yield old_write(self._buf.getbuffer()) if old_flush: old_flush() finally: self._buf_in_use = False self._write = old_write self._flush = old_flush
Context manager to temporarily buffer the output. :raise RuntimeError: If two :meth:`buffer` context managers are used nestedly. If the context manager is left without exception, the buffered output is sent to the actual sink. Otherwise, it is discarded. In addition to the output being buffered, buffer also captures the entire state of the XML generator and restores it to the previous state if the context manager is left with an exception. This can be used to fail-safely attempt to serialise a subtree and return to a well-defined state if serialisation fails. :meth:`flush` is not called automatically. If :meth:`flush` is called while a :meth:`buffer` context manager is active, no actual flushing happens (but unfinished opening tags are closed as usual, see the `short_empty_arguments` parameter).
def to_internal_value(self, data): model = self.Meta.model if "id" in data: author = model.objects.get(id=data["id"]) else: if "username" not in data: raise ValidationError("Authors must include an ID or a username.") username = data["username"] author = model.objects.get(username=username) return author
Basically, each author dict must include either a username or id.
def nodes(self, frequency=None): if frequency is None: [] elif frequency == 'per_session': return [self] elif frequency in ('per_visit', 'per_subject'): return [self.parent] elif frequency == 'per_study': return [self.parent.parent]
Returns all nodes of the specified frequency that are related to the given Session Parameters ---------- frequency : str | None The frequency of the nodes to return Returns ------- nodes : iterable[TreeNode] All nodes related to the Session for the specified frequency
def start_virtual_display(self, width=1440, height=900, colordepth=24, **kwargs): if self._display is None: logger.info("Using virtual display: '{0}x{1}x{2}'".format( width, height, colordepth)) self._display = Xvfb(int(width), int(height), int(colordepth), **kwargs) self._display.start() atexit.register(self._display.stop)
Starts virtual display which will be destroyed after test execution will be end *Arguments:* - width: a width to be set in pixels - height: a height to be set in pixels - color_depth: a color depth to be used - kwargs: extra parameters *Example:* | Start Virtual Display | | Start Virtual Display | 1920 | 1080 | | Start Virtual Display | ${1920} | ${1080} | ${16} |
def get_rows(self, infer_nrows, skiprows=None): if skiprows is None: skiprows = set() buffer_rows = [] detect_rows = [] for i, row in enumerate(self.f): if i not in skiprows: detect_rows.append(row) buffer_rows.append(row) if len(detect_rows) >= infer_nrows: break self.buffer = iter(buffer_rows) return detect_rows
Read rows from self.f, skipping as specified. We distinguish buffer_rows (the first <= infer_nrows lines) from the rows returned to detect_colspecs because it's simpler to leave the other locations with skiprows logic alone than to modify them to deal with the fact we skipped some rows here as well. Parameters ---------- infer_nrows : int Number of rows to read from self.f, not counting rows that are skipped. skiprows: set, optional Indices of rows to skip. Returns ------- detect_rows : list of str A list containing the rows to read.
def resources_after_reservation(res, constraint): res = res.copy() res[constraint.resource] -= (constraint.reservation.stop - constraint.reservation.start) return res
Return the resources available after a specified ReserveResourceConstraint has been applied. Note: the caller is responsible for testing that the constraint is applicable to the core whose resources are being constrained. Note: this function does not pay attention to the specific position of the reserved regieon, only its magnitude.
def slot(self, slot_index, marshal=None, unmarshal=None, build=None, cast=None, compress=False): def decorate(o): assert isinstance(o, PersistentMap) name = o.__class__.__name__ assert slot_index not in self._index_to_slot assert name not in self._name_to_slot o._zlmdb_slot = slot_index o._zlmdb_marshal = marshal o._zlmdb_unmarshal = unmarshal o._zlmdb_build = build o._zlmdb_cast = cast o._zlmdb_compress = compress _slot = Slot(slot_index, name, o) self._index_to_slot[slot_index] = _slot self._name_to_slot[name] = _slot return o return decorate
Decorator for use on classes derived from zlmdb.PersistentMap. The decorator define slots in a LMDB database schema based on persistent maps, and slot configuration. :param slot_index: :param marshal: :param unmarshal: :param build: :param cast: :param compress: :return:
def upgrade_defaults(self): self.defaults.upgrade() self.reset_defaults(self.defaults.filename)
Upgrade config file and reload.
def set_eep(self, data): self._bit_data, self._bit_status = self.eep.set_values(self._profile, self._bit_data, self._bit_status, data)
Update packet data based on EEP. Input data is a dictionary with keys corresponding to the EEP.
def blue_hour(self, direction=SUN_RISING, date=None, local=True, use_elevation=True): if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 start, end = self.astral.blue_hour_utc( direction, date, self.latitude, self.longitude, elevation ) if local: start = start.astimezone(self.tz) end = end.astimezone(self.tz) return start, end
Returns the start and end times of the Blue Hour when the sun is traversing in the specified direction. This method uses the definition from PhotoPills i.e. the blue hour is when the sun is between 6 and 4 degrees below the horizon. :param direction: Determines whether the time is for the sun rising or setting. Use ``astral.SUN_RISING`` or ``astral.SUN_SETTING``. Default is rising. :type direction: int :param date: The date for which to calculate the times. If no date is specified then the current date will be used. :type date: :class:`~datetime.date` :param local: True = Times to be returned in location's time zone; False = Times to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :return: A tuple of the date and time at which the Blue Hour starts and ends. :rtype: (:class:`~datetime.datetime`, :class:`~datetime.datetime`)
def value(self): if isinstance(self.code, Status): code = self.code.value else: code = self.code return {'code': code, 'errors': self.errors}
Utility method to retrieve Response Object information
def contains_version(self, version): if len(self.bounds) < 5: for bound in self.bounds: i = bound.version_containment(version) if i == 0: return True if i == -1: return False else: _, contains = self._contains_version(version) return contains return False
Returns True if version is contained in this range.
def _run_cmd(cmds): if not isinstance(cmds, str): cmds = "".join(cmds) print("Execute \"%s\"" % cmds) try: subprocess.check_call(cmds, shell=True) except subprocess.CalledProcessError as err: print(err) raise err
Run commands, raise exception if failed
def _checkMissingParamsFromWorkitem(self, copied_from, keep=False, **kwargs): parameters = self.listFieldsFromWorkitem(copied_from, keep=keep) self._findMissingParams(parameters, **kwargs)
Check the missing parameters for rendering directly from the copied workitem
def get_courses_metadata(self): metadata = dict(self._mdata['courses']) metadata.update({'existing_courses_values': self._my_map['courseIds']}) return Metadata(**metadata)
Gets the metadata for the courses. return: (osid.Metadata) - metadata for the courses *compliance: mandatory -- This method must be implemented.*
def portals(self): url = "%s/portals" % self.root return _portals.Portals(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
returns the Portals class that provides administration access into a given organization
def get_method_docstring(cls, method_name): method = getattr(cls, method_name, None) if method is None: return docstrign = inspect.getdoc(method) if docstrign is None: for base in cls.__bases__: docstrign = get_method_docstring(base, method_name) if docstrign: return docstrign else: return None return docstrign
return method docstring if method docstring is empty we get docstring from parent :param method: :type method: :return: :rtype:
def save_inventory(inventory, hosts_file=HOSTS_FILE): with open(hosts_file, 'w') as f: inventory.write(f)
Saves Ansible inventory to file. Parameters ---------- inventory: ConfigParser.SafeConfigParser content of the `hosts_file` hosts_file: str, optional path to Ansible hosts file
def get_collated_content(ident_hash, context_ident_hash, cursor): cursor.execute(SQL['get-collated-content'], (ident_hash, context_ident_hash,)) try: return cursor.fetchone()[0] except TypeError: return
Return collated content for ``ident_hash``.
def clean_all(self, config_file, region=None, profile_name=None): logging.info('[begin] Cleaning all provisioned artifacts') config = GroupConfigFile(config_file=config_file) if config.is_fresh() is True: raise ValueError("Config is already clean.") if region is None: region = self._region self._delete_group( config_file, region=region, profile_name=profile_name) self.clean_core(config_file, region=region) self.clean_devices(config_file, region=region) self.clean_file(config_file) logging.info('[end] Cleaned all provisioned artifacts')
Clean all provisioned artifacts from both the local file and the AWS Greengrass service. :param config_file: config file containing the group to clean :param region: the region in which the group should be cleaned. [default: us-west-2] :param profile_name: the name of the `awscli` profile to use. [default: None]
def websafe(s): s=s.replace("<","&lt;").replace(">","&gt;") s=s.replace(r'\x',r' \x') s=s.replace("\n","<br>") return s
return a string with HTML-safe text
def _scons_user_warning(e): etype, value, tb = sys.exc_info() filename, lineno, routine, dummy = find_deepest_user_frame(traceback.extract_tb(tb)) sys.stderr.write("\nscons: warning: %s\n" % e) sys.stderr.write('File "%s", line %d, in %s\n' % (filename, lineno, routine))
Handle user warnings. Print out a message and a description of the warning, along with the line number and routine where it occured. The file and line number will be the deepest stack frame that is not part of SCons itself.
def validate_hier_intervals(intervals_hier): label_top = util.generate_labels(intervals_hier[0]) boundaries = set(util.intervals_to_boundaries(intervals_hier[0])) for level, intervals in enumerate(intervals_hier[1:], 1): label_current = util.generate_labels(intervals) validate_structure(intervals_hier[0], label_top, intervals, label_current) new_bounds = set(util.intervals_to_boundaries(intervals)) if boundaries - new_bounds: warnings.warn('Segment hierarchy is inconsistent ' 'at level {:d}'.format(level)) boundaries |= new_bounds
Validate a hierarchical segment annotation. Parameters ---------- intervals_hier : ordered list of segmentations Raises ------ ValueError If any segmentation does not span the full duration of the top-level segmentation. If any segmentation does not start at 0.
def _expand_parameters(specification, parameters, original=None): expanded_specification = deepcopy(specification) try: for step_num, step in enumerate(expanded_specification['steps']): current_step = expanded_specification['steps'][step_num] for command_num, command in enumerate(step['commands']): current_step['commands'][command_num] = \ Template(command).substitute(parameters) if original: return specification else: return expanded_specification except KeyError as e: raise ValidationError('Workflow parameter(s) could not ' 'be expanded. Please take a look ' 'to {params}'.format(params=str(e)))
Expand parameters inside comands for Serial workflow specifications. :param specification: Full valid Serial workflow specification. :param parameters: Parameters to be extended on a Serial specification. :param original: Flag which, determins type of specifications to return. :returns: If 'original' parameter is set, a copy of the specification whithout expanded parametrers will be returned. If 'original' is not set, a copy of the specification with expanded parameters (all $varname and ${varname} will be expanded with their value). Otherwise an error will be thrown if the parameters can not be expanded. :raises: jsonschema.ValidationError
def to_binary(self, threshold=0.0): data = BINARY_IM_MAX_VAL * (self._data > threshold) return BinaryImage(data.astype(np.uint8), self._frame)
Creates a BinaryImage from the depth image. Points where the depth is greater than threshold are converted to ones, and all other points are zeros. Parameters ---------- threshold : float The depth threshold. Returns ------- :obj:`BinaryImage` A BinaryImage where all 1 points had a depth greater than threshold in the DepthImage.
def plot_xtb(fignum, XTB, Bs, e, f): plt.figure(num=fignum) plt.xlabel('Temperature (K)') plt.ylabel('Susceptibility (m^3/kg)') k = 0 Blab = [] for field in XTB: T, X = [], [] for xt in field: X.append(xt[0]) T.append(xt[1]) plt.plot(T, X) plt.text(T[-1], X[-1], '%8.2e' % (Bs[k]) + ' T') k += 1 plt.title(e + ': f = ' + '%i' % (int(f)) + ' Hz')
function to plot series of chi measurements as a function of temperature, holding frequency constant and varying B
def list_account_admins(self, account_id, user_id=None): path = {} data = {} params = {} path["account_id"] = account_id if user_id is not None: params["user_id"] = user_id self.logger.debug("GET /api/v1/accounts/{account_id}/admins with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/admins".format(**path), data=data, params=params, all_pages=True)
List account admins. List the admins in the account
def lstm_init_states(batch_size): hp = Hyperparams() init_shapes = lstm.init_states(batch_size=batch_size, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden) init_names = [s[0] for s in init_shapes] init_arrays = [mx.nd.zeros(x[1]) for x in init_shapes] return init_names, init_arrays
Returns a tuple of names and zero arrays for LSTM init states
def index_library_datasets(self, tick_f=None): dataset_n = 0 partition_n = 0 def tick(d, p): if tick_f: tick_f('datasets: {} partitions: {}'.format(d, p)) for dataset in self.library.datasets: if self.backend.dataset_index.index_one(dataset): dataset_n += 1 tick(dataset_n, partition_n) for partition in dataset.partitions: self.backend.partition_index.index_one(partition) partition_n += 1 tick(dataset_n, partition_n) else: pass
Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state.
def make_signed_token(self, key): t = JWS(self.claims) t.add_signature(key, protected=self.header) self.token = t
Signs the payload. Creates a JWS token with the header as the JWS protected header and the claims as the payload. See (:class:`jwcrypto.jws.JWS`) for details on the exceptions that may be reaised. :param key: A (:class:`jwcrypto.jwk.JWK`) key.
def valid_api_plugin(self, plugin): if (issubclass(plugin, APIPlugin) and hasattr(plugin, 'plugin_type') and plugin.plugin_type == 'api' and hasattr(plugin, 'request') and plugin.request != None and hasattr(plugin, 'request_class') and plugin.request_class != None and hasattr(plugin, 'response_class') and plugin.response_class != None): return True return False
Validate an API plugin, ensuring it is an API plugin and has the necessary fields present. `plugin` is a subclass of scruffy's Plugin class.
def compile_bundle_entry(self, spec, entry): modname, source, target, modpath = entry bundled_modpath = {modname: modpath} bundled_target = {modname: target} export_module_name = [] if isfile(source): export_module_name.append(modname) copy_target = join(spec[BUILD_DIR], target) if not exists(dirname(copy_target)): makedirs(dirname(copy_target)) shutil.copy(source, copy_target) elif isdir(source): copy_target = join(spec[BUILD_DIR], modname) shutil.copytree(source, copy_target) return bundled_modpath, bundled_target, export_module_name
Handler for each entry for the bundle method of the compile process. This copies the source file or directory into the build directory.
def register_class(self, instance, name=None): prefix_name = name or instance.__class__.__name__ for e in dir(instance): if e[0][0] != "_": self.register_function( getattr(instance, e), name="%s.%s" % (prefix_name, e) )
Add all functions of a class-instance to the RPC-services. All entries of the instance which do not begin with '_' are added. :Parameters: - myinst: class-instance containing the functions - name: | hierarchical prefix. | If omitted, the functions are added directly. | If given, the functions are added as "name.function". :TODO: - only add functions and omit attributes? - improve hierarchy?
def create_from_pybankid_exception(cls, exception): return cls( "{0}: {1}".format(exception.__class__.__name__, str(exception)), _exception_class_to_status_code.get(exception.__class__), )
Class method for initiating from a `PyBankID` exception. :param bankid.exceptions.BankIDError exception: :return: The wrapped exception. :rtype: :py:class:`~FlaskPyBankIDError`
def loads(s, single=False): corpus = etree.fromstring(s) if single: ds = _deserialize_mrs(next(corpus)) else: ds = (_deserialize_mrs(mrs_elem) for mrs_elem in corpus) return ds
Deserialize MRX string representations Args: s (str): a MRX string single (bool): if `True`, only return the first Xmrs object Returns: a generator of Xmrs objects (unless *single* is `True`)
def wait_for(func, timeout=10, step=1, default=None, func_args=(), func_kwargs=None): if func_kwargs is None: func_kwargs = dict() max_time = time.time() + timeout step = min(step or 1, timeout) * BLUR_FACTOR ret = default while time.time() <= max_time: call_ret = func(*func_args, **func_kwargs) if call_ret: ret = call_ret break else: time.sleep(step) step = min(step, max_time - time.time()) * BLUR_FACTOR if time.time() > max_time: log.warning("Exceeded waiting time (%s seconds) to exectute %s", timeout, func) return ret
Call `func` at regular intervals and Waits until the given function returns a truthy value within the given timeout and returns that value. @param func: @type func: function @param timeout: @type timeout: int | float @param step: Interval at which we should check for the value @type step: int | float @param default: Value that should be returned should `func` not return a truthy value @type default: @param func_args: *args for `func` @type func_args: list | tuple @param func_kwargs: **kwargs for `func` @type func_kwargs: dict @return: `default` or result of `func`
def _match_tags(repex_tags, path_tags): if 'any' in repex_tags or (not repex_tags and not path_tags): return True elif set(repex_tags) & set(path_tags): return True return False
Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match.
def insert_data(self, node, data, start, end): for item in data: self.recursive_insert(node, [item[0], item[1]], item[-1], start, end)
loops through all the data and inserts them into the empty tree
def benchmark(self, func, gpu_args, instance, times, verbose): logging.debug('benchmark ' + instance.name) logging.debug('thread block dimensions x,y,z=%d,%d,%d', *instance.threads) logging.debug('grid dimensions x,y,z=%d,%d,%d', *instance.grid) time = None try: time = self.dev.benchmark(func, gpu_args, instance.threads, instance.grid, times) except Exception as e: skippable_exceptions = ["too many resources requested for launch", "OUT_OF_RESOURCES", "INVALID_WORK_GROUP_SIZE"] if any([skip_str in str(e) for skip_str in skippable_exceptions]): logging.debug('benchmark fails due to runtime failure too many resources required') if verbose: print("skipping config", instance.name, "reason: too many resources requested for launch") else: logging.debug('benchmark encountered runtime failure: ' + str(e)) print("Error while benchmarking:", instance.name) raise e return time
benchmark the kernel instance
def create(self, credentials, friendly_name=values.unset, account_sid=values.unset): data = values.of({ 'Credentials': credentials, 'FriendlyName': friendly_name, 'AccountSid': account_sid, }) payload = self._version.create( 'POST', self._uri, data=data, ) return AwsInstance(self._version, payload, )
Create a new AwsInstance :param unicode credentials: A string that contains the AWS access credentials in the format <AWS_ACCESS_KEY_ID>:<AWS_SECRET_ACCESS_KEY> :param unicode friendly_name: A string to describe the resource :param unicode account_sid: The Subaccount this Credential should be associated with. :returns: Newly created AwsInstance :rtype: twilio.rest.accounts.v1.credential.aws.AwsInstance
def edterm(trmtyp, source, target, et, fixref, abcorr, obsrvr, npts): trmtyp = stypes.stringToCharP(trmtyp) source = stypes.stringToCharP(source) target = stypes.stringToCharP(target) et = ctypes.c_double(et) fixref = stypes.stringToCharP(fixref) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) trgepc = ctypes.c_double() obspos = stypes.emptyDoubleVector(3) trmpts = stypes.emptyDoubleMatrix(x=3, y=npts) npts = ctypes.c_int(npts) libspice.edterm_c(trmtyp, source, target, et, fixref, abcorr, obsrvr, npts, ctypes.byref(trgepc), obspos, trmpts) return trgepc.value, stypes.cVectorToPython(obspos), stypes.cMatrixToNumpy( trmpts)
Compute a set of points on the umbral or penumbral terminator of a specified target body, where the target shape is modeled as an ellipsoid. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/edterm_c.html :param trmtyp: Terminator type. :type trmtyp: str :param source: Light source. :type source: str :param target: Target body. :type target: str :param et: Observation epoch. :type et: str :param fixref: Body-fixed frame associated with target. :type fixref: str :param abcorr: Aberration correction. :type abcorr: str :param obsrvr: Observer. :type obsrvr: str :param npts: Number of points in terminator set. :type npts: int :return: Epoch associated with target center, Position of observer in body-fixed frame, Terminator point set. :rtype: tuple
def timeout(seconds): def _timeout_error(signal, frame): raise TimeoutError("Operation did not finish within \ {} seconds".format(seconds)) def timeout_decorator(func): @wraps(func) def timeout_wrapper(*args, **kwargs): signal.signal(signal.SIGALRM, _timeout_error) signal.alarm(seconds) try: return func(*args, **kwargs) finally: signal.alarm(0) return timeout_wrapper return timeout_decorator
Raises a TimeoutError if a function does not terminate within specified seconds.
def synchronize_resources(self): if not self._rpc.sync_start(): LOG.info("%(pid)s Failed to grab the sync lock", {'pid': os.getpid()}) greenthread.sleep(1) return for resource in self._resources_to_update: self.update_neutron_resource(resource) self._resources_to_update = list() for resource_type in reversed(self.sync_order): resource_type.delete_cvx_resources() for resource_type in self.sync_order: resource_type.create_cvx_resources() self._rpc.sync_end() if self._synchronizing_uuid: LOG.info("%(pid)s Full sync for cvx uuid %(uuid)s complete", {'uuid': self._synchronizing_uuid, 'pid': os.getpid()}) self._cvx_uuid = self._synchronizing_uuid self._synchronizing_uuid = None
Synchronize worker with CVX All database queries must occur while the sync lock is held. This tightly couples reads with writes and ensures that an older read does not result in the last write. Eg: Worker 1 reads (P1 created) Worder 2 reads (P1 deleted) Worker 2 writes (Delete P1 from CVX) Worker 1 writes (Create P1 on CVX) By ensuring that all reads occur with the sync lock held, we ensure that Worker 1 completes its writes before Worker2 is allowed to read. A failure to write results in a full resync and purges all reads from memory. It is also important that we compute resources to sync in reverse sync order in order to avoid missing dependencies on creation. Eg: If we query in sync order 1. Query Instances -> I1 isn't there 2. Query Port table -> Port P1 is there, connected to I1 3. We send P1 to CVX without sending I1 -> Error raised But if we query P1 first: 1. Query Ports P1 -> P1 is not there 2. Query Instances -> find I1 3. We create I1, not P1 -> harmless, mech driver creates P1 Missing dependencies on deletion will helpfully result in the dependent resource not being created: 1. Query Ports -> P1 is found 2. Query Instances -> I1 not found 3. Creating P1 fails on CVX
def type(self): return ffi.string(lib.EnvGetDefmessageHandlerType( self._env, self._cls, self._idx)).decode()
MessageHandler type.
def _cldf2wordlist(dataset, row='parameter_id', col='language_id'): return lingpy.Wordlist(_cldf2wld(dataset), row=row, col=col)
Read worldist object from cldf dataset.
def trace(fn): def wrapped(*args, **kwargs): msg = [] msg.append('Enter {}('.format(fn.__name__)) if args: msg.append(', '.join([str(x) for x in args])) if kwargs: kwargs_str = ', '.join(['{}={}'.format(k, v) for k, v in list(kwargs.items())]) if args: msg.append(', ') msg.append(kwargs_str) msg.append(')') print(''.join(msg)) ret = fn(*args, **kwargs) print('Return {}'.format(ret)) return ret return wrapped
Prints parameteters and return values of the each call of the wrapped function. Usage: decorate appropriate function or method: @trace def myf(): ...
def sonos_uri_from_id(self, item_id): item_id = quote_url(item_id.encode('utf-8')) account = self.account result = "soco://{}?sid={}&sn={}".format( item_id, self.service_id, account.serial_number ) return result
Get a uri which can be sent for playing. Args: item_id (str): The unique id of a playable item for this music service, such as that returned in the metadata from `get_metadata`, eg ``spotify:track:2qs5ZcLByNTctJKbhAZ9JE`` Returns: str: A URI of the form: ``soco://spotify%3Atrack %3A2qs5ZcLByNTctJKbhAZ9JE?sid=2311&sn=1`` which encodes the ``item_id``, and relevant data from the account for the music service. This URI can be sent to a Sonos device for playing, and the device itself will retrieve all the necessary metadata such as title, album etc.
def from_fptr(cls, label, type_, fptr): return FSEntry( label=label, type=type_, path=fptr.path, use=fptr.use, file_uuid=fptr.file_uuid, derived_from=fptr.derived_from, checksum=fptr.checksum, checksumtype=fptr.checksumtype, )
Return ``FSEntry`` object.
def select_spread( list_of_elements = None, number_of_elements = None ): if len(list_of_elements) <= number_of_elements: return list_of_elements if number_of_elements == 0: return [] if number_of_elements == 1: return [list_of_elements[int(round((len(list_of_elements) - 1) / 2))]] return \ [list_of_elements[int(round((len(list_of_elements) - 1) /\ (2 * number_of_elements)))]] +\ select_spread(list_of_elements[int(round((len(list_of_elements) - 1) /\ (number_of_elements))):], number_of_elements - 1)
This function returns the specified number of elements of a list spread approximately evenly.
def js_on_change(self, event, *callbacks): if len(callbacks) == 0: raise ValueError("js_on_change takes an event name and one or more callbacks, got only one parameter") from bokeh.models.callbacks import CustomJS if not all(isinstance(x, CustomJS) for x in callbacks): raise ValueError("not all callback values are CustomJS instances") if event in self.properties(): event = "change:%s" % event if event not in self.js_property_callbacks: self.js_property_callbacks[event] = [] for callback in callbacks: if callback in self.js_property_callbacks[event]: continue self.js_property_callbacks[event].append(callback)
Attach a ``CustomJS`` callback to an arbitrary BokehJS model event. On the BokehJS side, change events for model properties have the form ``"change:property_name"``. As a convenience, if the event name passed to this method is also the name of a property on the model, then it will be prefixed with ``"change:"`` automatically: .. code:: python # these two are equivalent source.js_on_change('data', callback) source.js_on_change('change:data', callback) However, there are other kinds of events that can be useful to respond to, in addition to property change events. For example to run a callback whenever data is streamed to a ``ColumnDataSource``, use the ``"stream"`` event on the source: .. code:: python source.js_on_change('streaming', callback)
def run_process(cwd, args): try: process = check_output(args, cwd=cwd, stderr=STDOUT) return process except CalledProcessError as e: log('Uh oh, the teapot broke again! Error:', e, type(e), lvl=verbose, pretty=True) log(e.cmd, e.returncode, e.output, lvl=verbose) return e.output
Executes an external process via subprocess.Popen
def append(self, other): if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): arrays = [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) return MultiIndex.from_arrays(arrays, names=self.names) to_concat = (self.values, ) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) try: return MultiIndex.from_tuples(new_tuples, names=self.names) except (TypeError, IndexError): return Index(new_tuples)
Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index
def _in_valid_interval(self, parameter, value): if parameter not in self._parameterIntervals: return True interval = self._parameterIntervals[parameter] if interval[2] and interval[3]: return interval[0] <= value <= interval[1] if not interval[2] and interval[3]: return interval[0] < value <= interval[1] if interval[2] and not interval[3]: return interval[0] <= value < interval[1] return interval[0] < value < interval[1]
Returns if the parameter is within its valid interval. :param string parameter: Name of the parameter that has to be checked. :param numeric value: Value of the parameter. :return: Returns :py:const:`True` it the value for the given parameter is valid, :py:const:`False` otherwise. :rtype: boolean
def reset_logformat_timestamped(logger: logging.Logger, extraname: str = "", level: int = logging.INFO) -> None: namebit = extraname + ":" if extraname else "" fmt = ("%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s:" + namebit + "%(message)s") reset_logformat(logger, fmt=fmt) logger.setLevel(level)
Apply a simple time-stamped log format to an existing logger, and set its loglevel to either ``logging.DEBUG`` or ``logging.INFO``. Args: logger: logger to modify extraname: additional name to append to the logger's name level: log level to set
def prune_by_ngram_size(self, minimum=None, maximum=None): self._logger.info('Pruning results by n-gram size') if minimum: self._matches = self._matches[ self._matches[constants.SIZE_FIELDNAME] >= minimum] if maximum: self._matches = self._matches[ self._matches[constants.SIZE_FIELDNAME] <= maximum]
Removes results rows whose n-gram size is outside the range specified by `minimum` and `maximum`. :param minimum: minimum n-gram size :type minimum: `int` :param maximum: maximum n-gram size :type maximum: `int`
def _create_fig( *, x_sc=bq.LinearScale, y_sc=bq.LinearScale, x_ax=bq.Axis, y_ax=bq.Axis, fig=bq.Figure, options={}, params={} ): params = _merge_with_defaults(params) x_sc = x_sc(**_call_params(params['x_sc'], options)) y_sc = y_sc(**_call_params(params['y_sc'], options)) options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc}) x_ax = x_ax(**_call_params(params['x_ax'], options)) y_ax = y_ax(**_call_params(params['y_ax'], options)) options = tz.merge(options, {'x_ax': x_ax, 'y_ax': y_ax, 'marks': []}) fig = fig(**_call_params(params['fig'], options)) return fig
Initializes scales and axes for a bqplot figure and returns the resulting blank figure. Each plot component is passed in as a class. The plot options should be passed into options. Any additional parameters to initialize plot components are passed into params as a dict of { plot_component: { trait: value, ... } } For example, to change the grid lines of the x-axis: params={ 'x_ax': {'grid_lines' : 'solid'} } If the param value is a function, it will be called with the options dict augmented with all previously created plot elements. This permits dependencies on plot elements: params={ 'x_ax': {'scale': lambda opts: opts['x_sc'] } }
def _read_words(filename): with tf.gfile.GFile(filename, "r") as f: if sys.version_info[0] >= 3: return f.read().replace("\n", " %s " % EOS).split() else: return f.read().decode("utf-8").replace("\n", " %s " % EOS).split()
Reads words from a file.
def str2tuple(str_in): tuple_out = safe_eval(str_in) if not isinstance(tuple_out, tuple): tuple_out = None return tuple_out
Extracts a tuple from a string. Args: str_in (string) that contains python tuple Returns: (dict) or None if no valid tuple was found Raises: -
def pull(rebase=True, refspec=None): options = rebase and '--rebase' or '' output = run('pull %s %s' % (options, refspec or '')) return not re.search('up.to.date', output)
Pull refspec from remote repository to local If refspec is left as None, then pull current branch The '--rebase' option is used unless rebase is set to false
def _build_instance_group_args(self, instance_group): params = { 'InstanceCount' : instance_group.num_instances, 'InstanceRole' : instance_group.role, 'InstanceType' : instance_group.type, 'Name' : instance_group.name, 'Market' : instance_group.market } if instance_group.market == 'SPOT': params['BidPrice'] = instance_group.bidprice return params
Takes an InstanceGroup; returns a dict that, when its keys are properly prefixed, can be used for describing InstanceGroups in RunJobFlow or AddInstanceGroups requests.
def getNorthSouthClone(self, i): north = self.getAdjacentClone(i, south=False) south = self.getAdjacentClone(i) return north, south
Returns the adjacent clone name from both sides.
def get_meter(self, site, start, end, point_type='Green_Button_Meter', var="meter", agg='MEAN', window='24h', aligned=True, return_names=True): start = self.convert_to_utc(start) end = self.convert_to_utc(end) request = self.compose_MDAL_dic(point_type=point_type, site=site, start=start, end=end, var=var, agg=agg, window=window, aligned=aligned) resp = self.m.query(request) if return_names: resp = self.replace_uuid_w_names(resp) return resp
Get meter data from MDAL. Parameters ---------- site : str Building name. start : str Start date - 'YYYY-MM-DDTHH:MM:SSZ' end : str End date - 'YYYY-MM-DDTHH:MM:SSZ' point_type : str Type of data, i.e. Green_Button_Meter, Building_Electric_Meter... var : str Variable - "meter", "weather"... agg : str Aggregation - MEAN, SUM, RAW... window : str Size of the moving window. aligned : bool ??? return_names : bool ??? Returns ------- (df, mapping, context) ???
def sigma_from_site_prop(self): num_coi = 0 if None in self.site_properties['grain_label']: raise RuntimeError('Site were merged, this property do not work') for tag in self.site_properties['grain_label']: if 'incident' in tag: num_coi += 1 return int(round(self.num_sites / num_coi))
This method returns the sigma value of the gb from site properties. If the GB structure merge some atoms due to the atoms too closer with each other, this property will not work.
def bfill(arr, dim=None, limit=None): axis = arr.get_axis_num(dim) _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc(_bfill, arr, dask='parallelized', keep_attrs=True, output_dtypes=[arr.dtype], kwargs=dict(n=_limit, axis=axis)).transpose(*arr.dims)
backfill missing values
def dependencies_of(self, address): assert address in self._target_by_address, ( 'Cannot retrieve dependencies of {address} because it is not in the BuildGraph.' .format(address=address) ) return self._target_dependencies_by_address[address]
Returns the dependencies of the Target at `address`. This method asserts that the address given is actually in the BuildGraph. :API: public