code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def ReplaceInstanceDisks(r, instance, disks=None, mode=REPLACE_DISK_AUTO, remote_node=None, iallocator=None, dry_run=False): if mode not in REPLACE_DISK: raise GanetiApiError("Invalid mode %r not one of %r" % (mode, REPLACE_DISK)) query = { "mode": mode, "dry-run": dry_run, } if disks: query["disks"] = ",".join(str(idx) for idx in disks) if remote_node: query["remote_node"] = remote_node if iallocator: query["iallocator"] = iallocator return r.request("post", "/2/instances/%s/replace-disks" % instance, query=query)
Replaces disks on an instance. @type instance: str @param instance: instance whose disks to replace @type disks: list of ints @param disks: Indexes of disks to replace @type mode: str @param mode: replacement mode to use (defaults to replace_auto) @type remote_node: str or None @param remote_node: new secondary node to use (for use with replace_new_secondary mode) @type iallocator: str or None @param iallocator: instance allocator plugin to use (for use with replace_auto mode) @type dry_run: bool @param dry_run: whether to perform a dry run @rtype: int @return: job id
def show_status(self): print_header('Attack work statistics') self.attack_work.read_all_from_datastore() self._show_status_for_work(self.attack_work) self._export_work_errors( self.attack_work, os.path.join(self.results_dir, 'attack_errors.txt')) print_header('Defense work statistics') self.defense_work.read_all_from_datastore() self._show_status_for_work(self.defense_work) self._export_work_errors( self.defense_work, os.path.join(self.results_dir, 'defense_errors.txt'))
Shows current status of competition evaluation. Also this method saves error messages generated by attacks and defenses into attack_errors.txt and defense_errors.txt.
def histogram1d(data, bins=None, *args, **kwargs): import dask if not hasattr(data, "dask"): data = dask.array.from_array(data, chunks=int(data.shape[0] / options["chunk_split"])) if not kwargs.get("adaptive", True): raise RuntimeError("Only adaptive histograms supported for dask (currently).") kwargs["adaptive"] = True def block_hist(array): return original_h1(array, bins, *args, **kwargs) return _run_dask( name="dask_adaptive1d", data=data, compute=kwargs.pop("compute", True), method=kwargs.pop("dask_method", "threaded"), func=block_hist)
Facade function to create one-dimensional histogram using dask. Parameters ---------- data: dask.DaskArray or array-like See also -------- physt.histogram
def available_state_for_gene(self, gene: Gene, state: State) -> Tuple[State, ...]: result: List[State] = [] active_multiplex: Tuple[Multiplex] = gene.active_multiplex(state) transition: Transition = self.find_transition(gene, active_multiplex) current_state: int = state[gene] done = set() for target_state in transition.states: target_state: int = self._state_after_transition(current_state, target_state) if target_state not in done: done.add(target_state) new_state: State = state.copy() new_state[gene] = target_state result.append(new_state) return tuple(result)
Return the state reachable from a given state for a particular gene.
def reset(self): self.activeCells = np.empty(0, dtype="uint32") self.activeDeltaSegments = np.empty(0, dtype="uint32") self.activeFeatureLocationSegments = np.empty(0, dtype="uint32")
Deactivate all cells.
def done(self): return [mm.name for mm in self.model.select().order_by(self.model.id)]
Scan migrations in database.
def incrby(self, key, increment): if not isinstance(increment, int): raise TypeError("increment must be of type int") return self.execute(b'INCRBY', key, increment)
Increment the integer value of a key by the given amount. :raises TypeError: if increment is not int
def downvote(self): url = self._imgur._base_url + "/3/gallery/{0}/vote/down".format(self.id) return self._imgur._send_request(url, needs_auth=True, method='POST')
Dislike this. A downvote will replace a neutral vote or an upvote. Downvoting something the authenticated user has already downvoted will set the vote to neutral.
def _step00(self, in_row, tmp_row, out_row): for key, value in in_row.items(): in_row[key] = WhitespaceCleaner.clean(value) return None, None
Prunes whitespace for all fields in the input row. :param dict in_row: The input row. :param dict tmp_row: Not used. :param dict out_row: Not used.
def config_program_reqs(cls, programs): cls._set_program_defaults(programs) do_png = cls.optipng or cls.pngout or cls.advpng do_jpeg = cls.mozjpeg or cls.jpegrescan or cls.jpegtran do_comics = cls.comics if not do_png and not do_jpeg and not do_comics: print("All optimizers are not available or disabled.") exit(1)
Run the program tester and determine if we can do anything.
def create_masks(input_dim, hidden_dims, input_order='left-to-right', hidden_order='left-to-right'): degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order) masks = [] for input_degrees, output_degrees in zip(degrees[:-1], degrees[1:]): mask = tf.cast(input_degrees[:, np.newaxis] <= output_degrees, tf.float32) masks.append(mask) mask = tf.cast(degrees[-1][:, np.newaxis] < degrees[0], tf.float32) masks.append(mask) return masks
Returns a list of binary mask matrices respecting autoregressive ordering. Args: input_dim: Number of inputs. hidden_dims: list with the number of hidden units per layer. It does not include the output layer; those number of units will always be set to input_dim downstream. Each hidden unit size must be at least the size of length (otherwise autoregressivity is not possible). input_order: Order of degrees to the input units: 'random', 'left-to-right', 'right-to-left', or an array of an explicit order. For example, 'left-to-right' builds an autoregressive model p(x) = p(x1) p(x2 | x1) ... p(xD | x<D). hidden_order: Order of degrees to the hidden units: 'random', 'left-to-right'. If 'left-to-right', hidden units are allocated equally (up to a remainder term) to each degree.
def get_index_fields(self): index_fields = self.get_meta_option('index', []) if index_fields: return index_fields model = getattr(self.model_serializer_meta, 'model', None) if model: pk_name = model._meta.pk.name if pk_name in self.child.get_fields(): return [pk_name] return []
List of fields to use for index
def _split_dict(dic): keys = sorted(dic.keys()) return keys, [dic[k] for k in keys]
Split dict into sorted keys and values >>> _split_dict({'b': 2, 'a': 1}) (['a', 'b'], [1, 2])
def cd(self, *subpaths): target = os.path.join(*subpaths) os.chdir(target)
Change the current working directory and update all the paths in the workspace. This is useful for commands that have to be run from a certain directory.
def pretty_print(session, feed): if feed in session.feeds: print() feed_info = os.path.join(session.data_dir, feed) entrylinks, linkdates = parse_feed_info(feed_info) print(feed) print("-"*len(feed)) print(''.join([" url: ", session.feeds[feed]["url"]])) if linkdates != []: print(''.join([" Next sync will download from: ", time.strftime( "%d %b %Y %H:%M:%S", tuple(max(linkdates))), "."])) else: print("You don't have a feed called {}.".format(feed), file=sys.stderr, flush=True)
Print the dictionary entry of a feed in a nice way.
def _batch_key(self, query): return ''.join( ['%s%s'%(k,v) for k,v in sorted(query.items())] )
Get a unique id from a query.
def bandpass(data, freqmin, freqmax, df, corners=4, zerophase=True): fe = 0.5 * df low = freqmin / fe high = freqmax / fe if high - 1.0 > -1e-6: msg = ("Selected high corner frequency ({}) of bandpass is at or " "above Nyquist ({}). Applying a high-pass instead.").format( freqmax, fe) warnings.warn(msg) return highpass(data, freq=freqmin, df=df, corners=corners, zerophase=zerophase) if low > 1: msg = "Selected low corner frequency is above Nyquist." raise ValueError(msg) z, p, k = iirfilter(corners, [low, high], btype='band', ftype='butter', output='zpk') sos = zpk2sos(z, p, k) if zerophase: firstpass = sosfilt(sos, data) return sosfilt(sos, firstpass[::-1])[::-1] else: return sosfilt(sos, data)
Butterworth-Bandpass Filter. Filter data from ``freqmin`` to ``freqmax`` using ``corners`` corners. The filter uses :func:`scipy.signal.iirfilter` (for design) and :func:`scipy.signal.sosfilt` (for applying the filter). :type data: numpy.ndarray :param data: Data to filter. :param freqmin: Pass band low corner frequency. :param freqmax: Pass band high corner frequency. :param df: Sampling rate in Hz. :param corners: Filter corners / order. :param zerophase: If True, apply filter once forwards and once backwards. This results in twice the filter order but zero phase shift in the resulting filtered trace. :return: Filtered data.
def sync_client(self): if not self._sync_client: self._sync_client = AlfSyncClient( token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'), client_id=self.config.get('OAUTH_CLIENT_ID'), client_secret=self.config.get('OAUTH_CLIENT_SECRET') ) return self._sync_client
Synchronous OAuth 2.0 Bearer client
def delete_row(self,keyValue=None,table=None,verbose=None): PARAMS=set_param(['keyValue','table'],[keyValue,table]) response=api(url=self.__url+"/delete row", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Deletes a row from a table.Requires the table name or SUID and the row key. :param keyValue (string): Specifies the primary key of a value in the row o f a table :param table (string, optional): Specifies a table by table name. If the pr efix SUID: is used, the table corresponding the SUID will be returne d.
def BuildServiceStub(self, cls): def _ServiceStubInit(stub, rpc_channel): stub.rpc_channel = rpc_channel self.cls = cls cls.__init__ = _ServiceStubInit for method in self.descriptor.methods: setattr(cls, method.name, self._GenerateStubMethod(method))
Constructs the stub class. Args: cls: The class that will be constructed.
def create_all(self, checkfirst: bool = True) -> None: self.base.metadata.create_all(bind=self.engine, checkfirst=checkfirst)
Create the PyBEL cache's database and tables. :param checkfirst: Check if the database exists before trying to re-make it
def app_evaluation_count(app_id, value=1): return TabEvaluation.select().where( (TabEvaluation.post_id == app_id) & (TabEvaluation.value == value) ).count()
Get the Evalution sum.
def get_header(self, header_name): if header_name in self.headers: return self.headers[header_name] return self.add_header_name(header_name)
Returns a header with that name, creates it if it does not exist.
def _flush_puts(self, items, options): datastore.Put(items, config=self._create_config(options))
Flush all puts to datastore.
def specialize(self, start, end): new_nodes = _curve_helpers.specialize_curve(self._nodes, start, end) return Curve(new_nodes, self._degree, _copy=False)
Specialize the curve to a given sub-interval. .. image:: ../../images/curve_specialize.png :align: center .. doctest:: curve-specialize >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve = bezier.Curve(nodes, degree=2) >>> new_curve = curve.specialize(-0.25, 0.75) >>> new_curve.nodes array([[-0.25 , 0.25 , 0.75 ], [-0.625, 0.875, 0.375]]) .. testcleanup:: curve-specialize import make_images make_images.curve_specialize(curve, new_curve) This is generalized version of :meth:`subdivide`, and can even match the output of that method: .. testsetup:: curve-specialize2 import numpy as np import bezier nodes = np.asfortranarray([ [0.0, 0.5, 1.0], [0.0, 1.0, 0.0], ]) curve = bezier.Curve(nodes, degree=2) .. doctest:: curve-specialize2 >>> left, right = curve.subdivide() >>> also_left = curve.specialize(0.0, 0.5) >>> np.all(also_left.nodes == left.nodes) True >>> also_right = curve.specialize(0.5, 1.0) >>> np.all(also_right.nodes == right.nodes) True Args: start (float): The start point of the interval we are specializing to. end (float): The end point of the interval we are specializing to. Returns: Curve: The newly-specialized curve.
def search_series(self, name=None, imdb_id=None, zap2it_id=None): arguments = locals() optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'} query_string = utils.query_param_string_from_option_args(optional_parameters, arguments) raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series', query_string), headers=self.__get_header_with_auth()) return self.parse_raw_response(raw_response)
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id. :param name: the name of the series to look for :param imdb_id: the IMDB id of the series to look for :param zap2it_id: the zap2it id of the series to look for. :return: a python dictionary with either the result of the search or an error from TheTVDB.
def evaluate(self): try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid condition".format(expr=self.expr, slf=self)) try: self.filter = self.terms.prune(FilterBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid filter".format(expr=self.expr, slf=self)) return self.condition, self.filter
create and return the numexpr condition and filter
def _is_declaration(self, name, value): if isinstance(value, (classmethod, staticmethod)): return False elif enums.get_builder_phase(value): return True return not name.startswith("_")
Determines if a class attribute is a field value declaration. Based on the name and value of the class attribute, return ``True`` if it looks like a declaration of a default field value, ``False`` if it is private (name starts with '_') or a classmethod or staticmethod.
def replace_more_comments(self, limit=32, threshold=1): if self._replaced_more: return [] remaining = limit more_comments = self._extract_more_comments(self.comments) skipped = [] while more_comments: item = heappop(more_comments) if remaining == 0: heappush(more_comments, item) break elif len(item.children) == 0 or 0 < item.count < threshold: heappush(skipped, item) continue new_comments = item.comments(update=False) if new_comments is not None and remaining is not None: remaining -= 1 elif new_comments is None: continue for more in self._extract_more_comments(new_comments): more._update_submission(self) heappush(more_comments, more) for comment in new_comments: self._insert_comment(comment) self._replaced_more = True return more_comments + skipped
Update the comment tree by replacing instances of MoreComments. :param limit: The maximum number of MoreComments objects to replace. Each replacement requires 1 API request. Set to None to have no limit, or to 0 to make no extra requests. Default: 32 :param threshold: The minimum number of children comments a MoreComments object must have in order to be replaced. Default: 1 :returns: A list of MoreComments objects that were not replaced. Note that after making this call, the `comments` attribute of the submission will no longer contain any MoreComments objects. Items that weren't replaced are still removed from the tree, and will be included in the returned list.
def would_move_be_promotion(self, location=None): location = location or self.location return (location.rank == 1 and self.color == color.black) or \ (location.rank == 6 and self.color == color.white)
Finds if move from current get_location would result in promotion :type: location: Location :rtype: bool
def auto_install(self): value = self.get(property_name='auto_install', environment_variable='PIP_ACCEL_AUTO_INSTALL', configuration_option='auto-install') if value is not None: return coerce_boolean(value)
Whether automatic installation of missing system packages is enabled. :data:`True` if automatic installation of missing system packages is enabled, :data:`False` if it is disabled, :data:`None` otherwise (in this case the user will be prompted at the appropriate time). - Environment variable: ``$PIP_ACCEL_AUTO_INSTALL`` (refer to :func:`~humanfriendly.coerce_boolean()` for details on how the value of the environment variable is interpreted) - Configuration option: ``auto-install`` (also parsed using :func:`~humanfriendly.coerce_boolean()`) - Default: :data:`None`
def _le_annot_parms(self, annot, p1, p2): w = annot.border["width"] sc = annot.colors["stroke"] if not sc: sc = (0,0,0) scol = " ".join(map(str, sc)) + " RG\n" fc = annot.colors["fill"] if not fc: fc = (0,0,0) fcol = " ".join(map(str, fc)) + " rg\n" nr = annot.rect np1 = p1 np2 = p2 m = self._hor_matrix(np1, np2) im = ~m L = np1 * m R = np2 * m if 0 <= annot.opacity < 1: opacity = "/Alp0 gs\n" else: opacity = "" return m, im, L, R, w, scol, fcol, opacity
Get common parameters for making line end symbols.
def write_json_corpus(documents, fnm): with codecs.open(fnm, 'wb', 'ascii') as f: for document in documents: f.write(json.dumps(document) + '\n') return documents
Write a lisst of Text instances as JSON corpus on disk. A JSON corpus contains one document per line, encoded in JSON. Parameters ---------- documents: iterable of estnltk.text.Text The documents of the corpus fnm: str The path to save the corpus.
def route_election(self, election): if ( election.election_type.slug == ElectionType.GENERAL or ElectionType.GENERAL_RUNOFF ): self.bootstrap_general_election(election) elif election.race.special: self.bootstrap_special_election(election) if election.race.office.is_executive: self.bootstrap_executive_office(election) else: self.bootstrap_legislative_office(election)
Legislative or executive office?
def calc_radius(latitude, ellipsoid='WGS84'): ellipsoids = { 'Airy (1830)': (6377.563, 6356.257), 'Bessel': (6377.397, 6356.079), 'Clarke (1880)': (6378.249145, 6356.51486955), 'FAI sphere': (6371, 6371), 'GRS-67': (6378.160, 6356.775), 'International': (6378.388, 6356.912), 'Krasovsky': (6378.245, 6356.863), 'NAD27': (6378.206, 6356.584), 'WGS66': (6378.145, 6356.758), 'WGS72': (6378.135, 6356.751), 'WGS84': (6378.137, 6356.752), } major, minor = ellipsoids[ellipsoid] eccentricity = 1 - (minor ** 2 / major ** 2) sl = math.sin(math.radians(latitude)) return (major * (1 - eccentricity)) / (1 - eccentricity * sl ** 2) ** 1.5
Calculate earth radius for a given latitude. This function is most useful when dealing with datasets that are very localised and require the accuracy of an ellipsoid model without the complexity of code necessary to actually use one. The results are meant to be used as a :data:`BODY_RADIUS` replacement when the simple geocentric value is not good enough. The original use for ``calc_radius`` is to set a more accurate radius value for use with trigpointing databases that are keyed on the OSGB36 datum, but it has been expanded to cover other ellipsoids. Args: latitude (float): Latitude to calculate earth radius for ellipsoid (tuple of float): Ellipsoid model to use for calculation Returns: float: Approximated Earth radius at the given latitude
def get_declared_items(self): for k, v in super(AndroidListView, self).get_declared_items(): if k == 'layout': yield k, v break
Override to do it manually
def get_video_start_time(video_file): if not os.path.isfile(video_file): print("Error, video file {} does not exist".format(video_file)) return None video_end_time = get_video_end_time(video_file) duration = get_video_duration(video_file) if video_end_time == None or duration == None: return None else: video_start_time = ( video_end_time - datetime.timedelta(seconds=duration)) return video_start_time
Get start time in seconds
def is_serving(self) -> bool: try: return self.server.is_serving() except AttributeError: return self.server.sockets is not None
Tell whether the server is accepting new connections or shutting down.
def includeme(config): settings = config.get_settings() config.add_view_predicate('last_retry_attempt', LastAttemptPredicate) config.add_view_predicate('retryable_error', RetryableErrorPredicate) def register(): attempts = int(settings.get('retry.attempts') or 3) settings['retry.attempts'] = attempts activate_hook = settings.get('retry.activate_hook') activate_hook = config.maybe_dotted(activate_hook) policy = RetryableExecutionPolicy( attempts, activate_hook=activate_hook, ) config.set_execution_policy(policy) config.action(None, register, order=PHASE1_CONFIG)
Activate the ``pyramid_retry`` execution policy in your application. This will add the :func:`pyramid_retry.RetryableErrorPolicy` with ``attempts`` pulled from the ``retry.attempts`` setting. The ``last_retry_attempt`` and ``retryable_error`` view predicates are registered. This should be included in your Pyramid application via ``config.include('pyramid_retry')``.
def main(): arguments, parser = parse_arguments() store = RevisionStore() sys.stdout.write("Iterating over %d items.\n" % arguments.number) for i in xrange(arguments.number): key = chr(65 + (i % 26)) value = [key * (i % 26), key * (i % 13), key * (i % 5)] store.add(key, value) if arguments.pause: sys.stdout.write("Done!") sys.stdin.readline()
Run a benchmark for N items. If N is not specified, take 1,000,000 for N.
def get_authorization_url(self, redirect_uri, client_id, options=None, scope=None): if not options: options = {} if not scope: scope = "manage_accounts,collect_payments," \ "view_user,preapprove_payments," \ "manage_subscriptions,send_money" options['scope'] = scope options['redirect_uri'] = redirect_uri options['client_id'] = client_id return self.browser_endpoint + '/oauth2/authorize?' + \ urllib.urlencode(options)
Returns a URL to send the user to in order to get authorization. After getting authorization the user will return to redirect_uri. Optionally, scope can be set to limit permissions, and the options dict can be loaded with any combination of state, user_name or user_email. :param str redirect_uri: The URI to redirect to after a authorization. :param str client_id: The client ID issued by WePay to your app. :keyword dict options: Allows for passing additional values to the authorize call, aside from scope, redirect_uri, and etc. :keyword str scope: A comma-separated string of permissions.
def get_data_context(context_type, options, *args, **kwargs): if context_type == "SqlAlchemy": return SqlAlchemyDataContext(options, *args, **kwargs) elif context_type == "PandasCSV": return PandasCSVDataContext(options, *args, **kwargs) else: raise ValueError("Unknown data context.")
Return a data_context object which exposes options to list datasets and get a dataset from that context. This is a new API in Great Expectations 0.4, and is subject to rapid change. :param context_type: (string) one of "SqlAlchemy" or "PandasCSV" :param options: options to be passed to the data context's connect method. :return: a new DataContext object
def payments(self): payments = self.client.subscription_payments.on(self).list() return payments
Return a list of payments for this subscription.
def create_client_for_file(self, filename, is_cython=False): self.create_new_client(filename=filename, is_cython=is_cython) self.master_clients -= 1 client = self.get_current_client() client.allow_rename = False tab_text = self.disambiguate_fname(filename) self.rename_client_tab(client, tab_text)
Create a client to execute code related to a file.
def consume(self, char): if self.state == "stream": self._stream(char) elif self.state == "escape": self._escape_sequence(char) elif self.state == "escape-lb": self._escape_parameters(char) elif self.state == "mode": self._mode(char) elif self.state == "charset-g0": self._charset_g0(char) elif self.state == "charset-g1": self._charset_g1(char)
Consume a single character and advance the state as necessary.
def _apply_properties(widget, properties={}): with widget.hold_sync(): for key, value in properties.items(): setattr(widget, key, value)
Applies the specified properties to the widget. `properties` is a dictionary with key value pairs corresponding to the properties to be applied to the widget.
def asarray(x, dtype=None): iterable = scalarasiter(x) if isinstance(iterable, ndarray): return iterable else: if not hasattr(iterable, '__len__'): iterable = list(iterable) if dtype == object_type: a = ndarray((len(iterable),), dtype=dtype) for i,v in enumerate(iterable): a[i] = v return a else: return array(iterable, dtype=dtype)
Convert ``x`` into a ``numpy.ndarray``.
def _css_rules_to_string(self, rules): lines = [] for item in rules: if isinstance(item, tuple): k, v = item lines.append("%s {%s}" % (k, make_important(v))) else: for rule in item.cssRules: if isinstance( rule, ( cssutils.css.csscomment.CSSComment, cssutils.css.cssunknownrule.CSSUnknownRule, ), ): continue for key in rule.style.keys(): rule.style[key] = ( rule.style.getPropertyValue(key, False), "!important", ) lines.append(item.cssText) return "\n".join(lines)
given a list of css rules returns a css string
def check_array_or_list(input): if type(input) != np.ndarray: if type(input) == list: output = np.array(input) else: raise TypeError('Expecting input type as ndarray or list.') else: output = input if output.ndim != 1: raise ValueError('Input array must have 1 dimension.') if np.sum(output < 0.) > 0: raise ValueError("Input array values cannot be negative.") return output
Return 1D ndarray, if input can be converted and elements are non-negative.
def http(self, *args, **kwargs): kwargs['api'] = self.api return http(*args, **kwargs)
Starts the process of building a new HTTP route linked to this API instance
def plotGenCost(generators): figure() plots = [] for generator in generators: if generator.pcost_model == PW_LINEAR: x = [x for x, _ in generator.p_cost] y = [y for _, y in generator.p_cost] elif generator.pcost_model == POLYNOMIAL: x = scipy.arange(generator.p_min, generator.p_max, 5) y = scipy.polyval(scipy.array(generator.p_cost), x) else: raise plots.append(plot(x, y)) xlabel("P (MW)") ylabel("Cost ($)") legend(plots, [g.name for g in generators]) show()
Plots the costs of the given generators.
def decode(self, inputs, context, inference=False): return self.decoder(inputs, context, inference)
Applies the decoder to inputs, given the context from the encoder. :param inputs: tensor with inputs (batch, seq_len) if 'batch_first' else (seq_len, batch) :param context: context from the encoder :param inference: if True inference mode, if False training mode
def on_for_seconds(self, steering, speed, seconds, brake=True, block=True): (left_speed, right_speed) = self.get_speed_steering(steering, speed) MoveTank.on_for_seconds(self, SpeedNativeUnits(left_speed), SpeedNativeUnits(right_speed), seconds, brake, block)
Rotate the motors according to the provided ``steering`` for ``seconds``.
def first_order_score(y, mean, scale, shape, skewness): m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0)) mean = mean + (skewness - (1.0/skewness))*scale*m1 if (y-mean)>=0: return ((shape+1)/shape)*(y-mean)/(np.power(skewness*scale,2) + (np.power(y-mean,2)/shape)) else: return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(skewness*(y-mean),2)/shape))
GAS Skew t Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Skew t distribution scale : float scale parameter for the Skew t distribution shape : float tail thickness parameter for the Skew t distribution skewness : float skewness parameter for the Skew t distribution Returns ---------- - Score of the Skew t family
def generate_content_encoding(self): if self._definition['contentEncoding'] == 'base64': with self.l('if isinstance({variable}, str):'): with self.l('try:'): self.l('import base64') self.l('{variable} = base64.b64decode({variable})') with self.l('except Exception:'): self.l('raise JsonSchemaException("{name} must be encoded by base64")') with self.l('if {variable} == "":'): self.l('raise JsonSchemaException("contentEncoding must be base64")')
Means decoding value when it's encoded by base64. .. code-block:: python { 'contentEncoding': 'base64', }
def delegate(attribute_name, method_names): info = { 'attribute': attribute_name, 'methods': method_names } def decorator(cls): attribute = info['attribute'] if attribute.startswith("__"): attribute = "_" + cls.__name__ + attribute for name in info['methods']: setattr(cls, name, eval("lambda self, *a, **kw: " "self.{0}.{1}(*a, **kw)".format(attribute, name))) return cls return decorator
Pass the call to the attribute called attribute_name for every method listed in method_names.
def get_distribution(dist): if isinstance(dist, six.string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist
Return a current distribution object for a Requirement or string
def aggregate_series(self, *args, **kwargs) -> InfoArray: mode = self.aggregation_ext if mode == 'none': return self.series elif mode == 'mean': return self.average_series(*args, **kwargs) else: raise RuntimeError( 'Unknown aggregation mode `%s` for sequence %s.' % (mode, objecttools.devicephrase(self)))
Aggregates time series data based on the actual |FluxSequence.aggregation_ext| attribute of |IOSequence| subclasses. We prepare some nodes and elements with the help of method |prepare_io_example_1| and select a 1-dimensional flux sequence of type |lland_fluxes.NKor| as an example: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> seq = elements.element3.model.sequences.fluxes.nkor If no |FluxSequence.aggregation_ext| is `none`, the original time series values are returned: >>> seq.aggregation_ext 'none' >>> seq.aggregate_series() InfoArray([[ 24., 25., 26.], [ 27., 28., 29.], [ 30., 31., 32.], [ 33., 34., 35.]]) If no |FluxSequence.aggregation_ext| is `mean`, function |IOSequence.aggregate_series| is called: >>> seq.aggregation_ext = 'mean' >>> seq.aggregate_series() InfoArray([ 25., 28., 31., 34.]) In case the state of the sequence is invalid: >>> seq.aggregation_ext = 'nonexistent' >>> seq.aggregate_series() Traceback (most recent call last): ... RuntimeError: Unknown aggregation mode `nonexistent` for \ sequence `nkor` of element `element3`. The following technical test confirms that all potential positional and keyword arguments are passed properly: >>> seq.aggregation_ext = 'mean' >>> from unittest import mock >>> seq.average_series = mock.MagicMock() >>> _ = seq.aggregate_series(1, x=2) >>> seq.average_series.assert_called_with(1, x=2)
def transformer_tpu_1b(): hparams = transformer_tpu() hparams.hidden_size = 2048 hparams.filter_size = 8192 hparams.num_hidden_layers = 8 hparams.batch_size = 1024 hparams.activation_dtype = "bfloat16" hparams.weight_dtype = "bfloat16" hparams.shared_embedding_and_softmax_weights = False return hparams
Hparams for machine translation with ~1.1B parameters.
def _handle_eor(self, route_family): LOG.debug('Handling EOR for %s', route_family) if route_family == RF_RTC_UC: self._unschedule_sending_init_updates() tm = self._core_service.table_manager for rt_nlri in self._init_rtc_nlri_path: tm.learn_path(rt_nlri) self.pause(0) self._init_rtc_nlri_path = None
Currently we only handle EOR for RTC address-family. We send non-rtc initial updates if not already sent.
def send_events(self, events): for event in events: self.queue.events.add().MergeFrom(event) return None
Adds multiple events to the queued message :returns: None - nothing has been sent to the Riemann server yet
def tx_output(network: str, value: Decimal, n: int, script: ScriptSig) -> TxOut: network_params = net_query(network) return TxOut(network=network_params, value=int(value * network_params.to_unit), n=n, script_pubkey=script)
create TxOut object
def get_message(self): if not self._message: return None self._populate_message_attributes(self._message) return self._message
Get the underlying C message from this object. :rtype: uamqp.c_uamqp.cMessage
def _put_many(self, items: Iterable[DtoObject], cls): if cls._dto_type in self._expirations and self._expirations[cls._dto_type] == 0: return session = self._session for item in items: item = cls(**item) item.updated() session.merge(item)
Puts many items into the database. Updates lastUpdate column for each of them
def _get_implied_apps(self, detected_apps): def __get_implied_apps(apps): _implied_apps = set() for app in apps: try: _implied_apps.update(set(self.apps[app]['implies'])) except KeyError: pass return _implied_apps implied_apps = __get_implied_apps(detected_apps) all_implied_apps = set() while not all_implied_apps.issuperset(implied_apps): all_implied_apps.update(implied_apps) implied_apps = __get_implied_apps(all_implied_apps) return all_implied_apps
Get the set of apps implied by `detected_apps`.
def _locate(self, idx): start = idx * self._width end = (idx + 1) * self._width sbyte, sbit = divmod(start, 8) ebyte = BinInt(end).ceildiv(8) return sbyte, sbit, ebyte
Locates an element in the internal data representation. Returns starting byte index, starting bit index in the starting byte, and one past the final byte index.
def shorten(string, max_length=80, trailing_chars=3): assert type(string).__name__ in {'str', 'unicode'}, 'shorten needs string to be a string, not {}'.format(type(string)) assert type(max_length) == int, 'shorten needs max_length to be an int, not {}'.format(type(max_length)) assert type(trailing_chars) == int, 'shorten needs trailing_chars to be an int, not {}'.format(type(trailing_chars)) assert max_length > 0, 'shorten needs max_length to be positive, not {}'.format(max_length) assert trailing_chars >= 0, 'shorten needs trailing_chars to be greater than or equal to 0, not {}'.format(trailing_chars) return ( string ) if len(string) <= max_length else ( '{before:}...{after:}'.format( before=string[:max_length-(trailing_chars+3)], after=string[-trailing_chars:] if trailing_chars>0 else '' ) )
trims the 'string' argument down to 'max_length' to make previews to long string values
def set_default_by_index(self, index): if index >= len(self._datasets): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) self._default_index = index
Set the default dataset by its index. After changing the default dataset, all calls without explicitly specifying the dataset by index or alias will be redirected to this dataset. Args: index (int): The index of the dataset that should be made the default. Raises: DataInvalidIndex: If the index does not represent a valid dataset.
def update_from_stripe_data(self, stripe_coupon, exclude_fields=None, commit=True): fields_to_update = self.STRIPE_FIELDS - set(exclude_fields or []) update_data = {key: stripe_coupon[key] for key in fields_to_update} for field in ["created", "redeem_by"]: if update_data.get(field): update_data[field] = timestamp_to_timezone_aware_date(update_data[field]) if update_data.get("amount_off"): update_data["amount_off"] = Decimal(update_data["amount_off"]) / 100 for key, value in six.iteritems(update_data): setattr(self, key, value) if commit: return StripeCoupon.objects.filter(pk=self.pk).update(**update_data)
Update StripeCoupon object with data from stripe.Coupon without calling stripe.Coupon.retrieve. To only update the object, set the commit param to False. Returns the number of rows altered or None if commit is False.
def serialize(self, private=False): if not self.priv_key and not self.pub_key: raise SerializationNotPossible() res = self.common() public_longs = list(set(self.public_members) & set(self.longs)) for param in public_longs: item = getattr(self, param) if item: res[param] = item if private: for param in self.longs: if not private and param in ["d", "p", "q", "dp", "dq", "di", "qi"]: continue item = getattr(self, param) if item: res[param] = item if self.x5c: res['x5c'] = [x.decode('utf-8') for x in self.x5c] return res
Given a cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey or RSAPublicKey instance construct the JWK representation. :param private: Should I do the private part or not :return: A JWK as a dictionary
def get_leaf_node_path_list(self, sep=os.path.sep, type_str=None): return [v.get_path_str(sep, type_str) for v in self.leaf_node_gen]
Get paths for all leaf nodes for the tree rooted at this node. Args: sep: str One or more characters to insert between each element in the path. Defaults to "/" on Unix and "\" on Windows. type_str: SUBJECT_NODE_TAG, TYPE_NODE_TAG or None. If set, only include information from nodes of that type. Returns: list of str: The paths to the leaf nodes for the tree rooted at this node.
def get_logged_in_account(token_manager=None, app_url=defaults.APP_URL): return get_logged_in_account(token_manager=token_manager, app_url=app_url)['id']
get the account details for logged in account of the auth token_manager
def Command(self, Command, Reply=u'', Block=False, Timeout=30000, Id=-1): from api import Command as CommandClass return CommandClass(Command, Reply, Block, Timeout, Id)
Creates an API command object. :Parameters: Command : unicode Command string. Reply : unicode Expected reply. By default any reply is accepted (except errors which raise an `SkypeError` exception). Block : bool If set to True, `SendCommand` method waits for a response from Skype API before returning. Timeout : float, int or long Timeout. Used if Block == True. Timeout may be expressed in milliseconds if the type is int or long or in seconds (or fractions thereof) if the type is float. Id : int Command Id. The default (-1) means it will be assigned automatically as soon as the command is sent. :return: A command object. :rtype: `Command` :see: `SendCommand`
def pf_to_n(L, pf, R): dim = L.shape[0] n = int(round(pf * np.product(L) / sphere_volume(R, dim))) pf_actual = n_to_pf(L, n, R) return n, pf_actual
Returns the number of non-intersecting spheres required to achieve as close to a given packing fraction as possible, along with the actual achieved packing fraction. for a number of non-intersecting spheres. Parameters ---------- L: float array, shape (d,) System lengths. pf: float Fraction of space to be occupied by the spheres. R: float Sphere radius. Returns ------- n: integer Number of spheres required to achieve a packing fraction `pf_actual` pf_actual: Fraction of space occupied by `n` spheres. This is the closest possible fraction achievable to `pf`.
def move_to(self, start, end): start = numpy.array(start) end = numpy.array(end) if numpy.allclose(start, end): raise ValueError('start and end must NOT be identical') translation, angle, axis, point = find_transformations( self.helix_start, self.helix_end, start, end) if not numpy.isclose(angle, 0.0): self.rotate(angle=angle, axis=axis, point=point, radians=False) self.translate(vector=translation) return
Moves the `Polynucleotide` to lie on the `start` and `end` vector. Parameters ---------- start : 3D Vector (tuple or list or numpy.array) The coordinate of the start of the helix primitive. end : 3D Vector (tuple or list or numpy.array) The coordinate of the end of the helix primitive. Raises ------ ValueError Raised if `start` and `end` are very close together.
def try_to_restart_deads(self): to_restart = self.to_restart[:] del self.to_restart[:] for instance in to_restart: logger.warning("Trying to restart module: %s", instance.name) if self.try_instance_init(instance): logger.warning("Restarting %s...", instance.name) instance.process = None instance.start() else: self.to_restart.append(instance)
Try to reinit and restart dead instances :return: None
def _eval_call(self, node): try: func = self.functions[node.func.id] except KeyError: raise NameError(node.func.id) value = func( *(self._eval(a) for a in node.args), **dict(self._eval(k) for k in node.keywords) ) if value is True: return 1 elif value is False: return 0 else: return value
Evaluate a function call :param node: Node to eval :return: Result of node
def reset_ilo(self): manager, reset_uri = self._get_ilo_details() action = {'Action': 'Reset'} status, headers, response = self._rest_post(reset_uri, None, action) if(status != 200): msg = self._get_extended_error(response) raise exception.IloError(msg) common.wait_for_ilo_after_reset(self)
Resets the iLO. :raises: IloError, on an error from iLO. :raises: IloConnectionError, if iLO is not up after reset. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
def translate_update(blob): "converts JSON parse output to self-aware objects" return {translate_key(k):parse_serialdiff(v) for k,v in blob.items()}
converts JSON parse output to self-aware objects
def add_file_metadata(self, fname): file_dict = {} file_dict["fullfilename"] = fname try: file_dict["name"] = os.path.basename(fname) file_dict["date"] = self.GetDateAsString(fname) file_dict["size"] = os.path.getsize(fname) file_dict["path"] = os.path.dirname(fname) except IOError: print('Error getting metadata for file') self.fl_metadata.append(file_dict)
collects the files metadata - note that this will fail with strange errors if network connection drops out to shared folder, but it is better to stop the program rather than do a try except otherwise you will get an incomplete set of files.
def pull(self): lock_acquired = self._pull_lock.acquire(blocking=False) if not lock_acquired: raise PullOrderException() return self._results_generator()
Returns a generator containing the results of the next query in the pipeline
def _qteMouseClicked(self, widgetObj): app = qteGetAppletFromWidget(widgetObj) if app is None: return else: self._qteActiveApplet = app if not hasattr(widgetObj, '_qteAdmin'): self._qteActiveApplet.qteMakeWidgetActive(widgetObj) else: if app._qteAdmin.isQtmacsApplet: self._qteActiveApplet.qteMakeWidgetActive(None) else: self._qteActiveApplet.qteMakeWidgetActive(widgetObj) self._qteFocusManager()
Update the Qtmacs internal focus state as the result of a mouse click. |Args| * ``new`` (**QWidget**): the widget that received the focus. |Returns| * **None** |Raises| * **None**
def run_cmd(command, verbose=True, shell='/bin/bash'): process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT, executable=shell) output = process.stdout.read().decode().strip().split('\n') if verbose: return output return [line for line in output if line.strip()]
internal helper function to run shell commands and get output
def set_root(self, root): if root is None: return for plot in self.traverse(lambda x: x): plot._root = root
Sets the root model on all subplots.
def terms(self, facet_name, field, size=10, order=None, all_terms=False, exclude=[], regex='', regex_flags=''): self[facet_name] = dict(terms=dict(field=field, size=size)) if order: self[facet_name][terms]['order'] = order if all_terms: self[facet_name][terms]['all_terms'] = True if exclude: self[facet_name][terms]['exclude'] = exclude if regex: self[facet_name][terms]['regex'] = regex if regex_flags: self[facet_name][terms]['regex_flags'] = regex_flags return self
Allow to specify field facets that return the N most frequent terms. Ordering: Allow to control the ordering of the terms facets, to be ordered by count, term, reverse_count or reverse_term. The default is count. All Terms: Allow to get all the terms in the terms facet, ones that do not match a hit, will have a count of 0. Note, this should not be used with fields that have many terms. Excluding Terms: It is possible to specify a set of terms that should be excluded from the terms facet request result. Regex Patterns: The terms API allows to define regex expression that will control which terms will be included in the faceted list.
def delete_events(self, event_collection, params): url = "{0}/{1}/projects/{2}/events/{3}".format(self.base_url, self.api_version, self.project_id, event_collection) headers = utilities.headers(self.master_key) response = self.fulfill(HTTPMethods.DELETE, url, params=params, headers=headers, timeout=self.post_timeout) self._error_handling(response) return True
Deletes events via the Keen IO API. A master key must be set first. :param event_collection: string, the event collection from which event are being deleted
def nlmsg_find_attr(nlh, hdrlen, attrtype): return nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), attrtype)
Find a specific attribute in a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L231 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). attrtype -- type of attribute to look for (integer). Returns: The first attribute which matches the specified type (nlattr class instance).
def _get_field_doc(self, field): fieldspec = dict() fieldspec['type'] = field.__class__.__name__ fieldspec['required'] = field.required fieldspec['validators'] = [{validator.__class__.__name__: validator.__dict__} for validator in field.validators] return fieldspec
Return documentation for a field in the representation.
def routeByMonthAbbr(self, request, year, monthAbbr): month = (DatePictures['Mon'].index(monthAbbr.lower()) // 4) + 1 return self.serveMonth(request, year, month)
Route a request with a month abbreviation to the monthly view.
def normalize_mesh(mesh): mesh = dict(mesh) pos = mesh['position'][:,:3].copy() pos -= (pos.max(0)+pos.min(0)) / 2.0 pos /= np.abs(pos).max() mesh['position'] = pos return mesh
Scale mesh to fit into -1..1 cube
def retrieve_matching_jwk(self, token): response_jwks = requests.get( self.OIDC_OP_JWKS_ENDPOINT, verify=self.get_settings('OIDC_VERIFY_SSL', True) ) response_jwks.raise_for_status() jwks = response_jwks.json() jws = JWS.from_compact(token) json_header = jws.signature.protected header = Header.json_loads(json_header) key = None for jwk in jwks['keys']: if jwk['kid'] != smart_text(header.kid): continue if 'alg' in jwk and jwk['alg'] != smart_text(header.alg): raise SuspiciousOperation('alg values do not match.') key = jwk if key is None: raise SuspiciousOperation('Could not find a valid JWKS.') return key
Get the signing key by exploring the JWKS endpoint of the OP.
def save_response_content(response, filename='data.csv', destination=os.path.curdir, chunksize=32768): chunksize = chunksize or 32768 if os.path.sep in filename: full_destination_path = filename else: full_destination_path = os.path.join(destination, filename) full_destination_path = expand_filepath(full_destination_path) with open(full_destination_path, "wb") as f: for chunk in tqdm(response.iter_content(CHUNK_SIZE)): if chunk: f.write(chunk) return full_destination_path
For streaming response from requests, download the content one CHUNK at a time
def makeCredentials(path, email): key = _generateKey() cert = _makeCertificate(key, email) certPath = path.child("client.pem") certPath.alwaysCreate = True with certPath.open("wb") as pemFile: pemFile.write(dump_privatekey(FILETYPE_PEM, key)) pemFile.write(dump_certificate(FILETYPE_PEM, cert))
Make credentials for the client from given e-mail address and store them in the directory at path.
def use_value(self, value): if self.check_value(value): return value return self.convert_value(value)
Converts value to field type or use original
def get_arbiter_broks(self): with self.arbiter_broks_lock: statsmgr.gauge('get-new-broks-count.arbiter', len(self.arbiter_broks)) self.external_broks.extend(self.arbiter_broks) self.arbiter_broks = []
Get the broks from the arbiters, but as the arbiter_broks list can be push by arbiter without Global lock, we must protect this with a lock TODO: really? check this arbiter behavior! :return: None
def init_nautilus(method): print("Preference elicitation options:") print("\t1 - Percentages") print("\t2 - Relative ranks") print("\t3 - Direct") PREFCLASSES = [PercentageSpecifictation, RelativeRanking, DirectSpecification] pref_sel = int( _prompt_wrapper( "Reference elicitation ", default=u"%s" % (1), validator=NumberValidator([1, 3]), ) ) preference_class = PREFCLASSES[pref_sel - 1] print("Nadir: %s" % method.problem.nadir) print("Ideal: %s" % method.problem.ideal) if method.current_iter - method.user_iters: finished_iter = method.user_iters - method.current_iter else: finished_iter = 0 new_iters = int( _prompt_wrapper( u"Ni: ", default=u"%s" % (method.current_iter), validator=NumberValidator() ) ) method.current_iter = new_iters method.user_iters = finished_iter + new_iters return preference_class
Initialize nautilus method Parameters ---------- method Interactive method used for the process Returns ------- PreferenceInformation subclass to be initialized
def handle_memory(self, obj): if obj.subject is not None: with self.con as db: SchemaBase.note( db, obj.subject, obj.state, obj.object, text=obj.text, html=obj.html, ) return obj
Handle a memory event. This function accesses the internal database. It writes a record containing state information and an optional note. :param obj: A :py:class:`~turberfield.dialogue.model.Model.Memory` object. :return: The supplied object.
def load(path): with open(path) as rfile: steps = MODEL.parse(rfile.read()) new_steps = [] for step in steps: new_steps += expand_includes(step, path) return new_steps
Load |path| and recursively expand any includes.
def is_result_edition_allowed(self, analysis_brain): if not self.is_analysis_edition_allowed(analysis_brain): return False obj = api.get_object(analysis_brain) if not obj.getDetectionLimitOperand(): return True if obj.getDetectionLimitSelector(): if not obj.getAllowManualDetectionLimit(): return False return True
Checks if the edition of the result field is allowed :param analysis_brain: Brain that represents an analysis :return: True if the user can edit the result field, otherwise False
def set_progress_brackets(self, start, end): self.sep_start = start self.sep_end = end
Set brackets to set around a progress bar.