code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def resources_gc_prefix(options, policy_config, policy_collection): policy_regions = {} for p in policy_collection: if p.execution_mode == 'poll': continue policy_regions.setdefault(p.options.region, []).append(p) regions = get_gc_regions(options.regions) for r in regions: region_gc(options, r, policy_config, policy_regions.get(r, []))
Garbage collect old custodian policies based on prefix. We attempt to introspect to find the event sources for a policy but without the old configuration this is implicit.
def sample_grid(self, count=None, step=None): if (count is not None and step is not None): raise ValueError('only step OR count can be specified!') bounds = np.array([-self.primitive.extents, self.primitive.extents]) * .5 if step is not None: grid = util.grid_arange(bounds, step=step) elif count is not None: grid = util.grid_linspace(bounds, count=count) else: raise ValueError('either count or step must be specified!') transformed = transformations.transform_points( grid, matrix=self.primitive.transform) return transformed
Return a 3D grid which is contained by the box. Samples are either 'step' distance apart, or there are 'count' samples per box side. Parameters ----------- count : int or (3,) int If specified samples are spaced with np.linspace step : float or (3,) float If specified samples are spaced with np.arange Returns ----------- grid : (n, 3) float Points inside the box
def setup(self, interval): self.trace_counter = 0 self._halt = False self.interval = interval
Prepares the tests for execution, interval in ms
def format_energy_results(energy): if not energy: return {} result = {} cpuenergy = Decimal(0) for pkg, domains in energy.items(): for domain, value in domains.items(): if domain == DOMAIN_PACKAGE: cpuenergy += value result['cpuenergy-pkg{}'.format(pkg)] = value else: result['cpuenergy-pkg{}-{}'.format(pkg, domain)] = value result['cpuenergy'] = cpuenergy result = collections.OrderedDict(sorted(result.items())) return result
Take the result of an energy measurement and return a flat dictionary that contains all values.
def get_user_info(self): resp = self.requester.get( urljoin( self.base_url, '/api/mobile/v0.5/my_user_info' ) ) resp.raise_for_status() return Info(resp.json())
Returns a UserInfo object for the logged in user. Returns: UserInfo: object representing the student current grades
def matches_querytime(instance, querytime): if not querytime.active: return True if not querytime.time: return instance.version_end_date is None return (instance.version_start_date <= querytime.time and (instance.version_end_date is None or instance.version_end_date > querytime.time))
Checks whether the given instance satisfies the given QueryTime object. :param instance: an instance of Versionable :param querytime: QueryTime value to check against
def resource(resource_id): resource_obj = app.db.resource(resource_id) if 'raw' in request.args: return send_from_directory(os.path.dirname(resource_obj.path), os.path.basename(resource_obj.path)) return render_template('resource.html', resource=resource_obj)
Show a resource.
def query( self, query, job_config=None, job_id=None, job_id_prefix=None, location=None, project=None, retry=DEFAULT_RETRY, ): job_id = _make_job_id(job_id, job_id_prefix) if project is None: project = self.project if location is None: location = self.location if self._default_query_job_config: if job_config: job_config = job_config._fill_from_default( self._default_query_job_config ) else: job_config = self._default_query_job_config job_ref = job._JobReference(job_id, project=project, location=location) query_job = job.QueryJob(job_ref, query, client=self, job_config=job_config) query_job._begin(retry=retry) return query_job
Run a SQL query. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query Arguments: query (str): SQL query to be executed. Defaults to the standard SQL dialect. Use the ``job_config`` parameter to change dialects. Keyword Arguments: job_config (google.cloud.bigquery.job.QueryJobConfig): (Optional) Extra configuration options for the job. To override any options that were previously set in the ``default_query_job_config`` given to the ``Client`` constructor, manually set those options to ``None``, or whatever value is preferred. job_id (str): (Optional) ID to use for the query job. job_id_prefix (str): (Optional) The prefix to use for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the any table used in the query as well as the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.QueryJob: A new query job instance.
def check_input_sample(input_sample, num_params, num_samples): assert type(input_sample) == np.ndarray, \ "Input sample is not an numpy array" assert input_sample.shape[0] == (num_params + 1) * num_samples, \ "Input sample does not match number of parameters or groups" assert np.any((input_sample >= 0) | (input_sample <= 1)), \ "Input sample must be scaled between 0 and 1"
Check the `input_sample` is valid Checks input sample is: - the correct size - values between 0 and 1 Arguments --------- input_sample : numpy.ndarray num_params : int num_samples : int
async def do_upload(context, files): status = 0 try: await upload_artifacts(context, files) except ScriptWorkerException as e: status = worst_level(status, e.exit_code) log.error("Hit ScriptWorkerException: {}".format(e)) except aiohttp.ClientError as e: status = worst_level(status, STATUSES['intermittent-task']) log.error("Hit aiohttp error: {}".format(e)) except Exception as e: log.exception("SCRIPTWORKER_UNEXPECTED_EXCEPTION upload {}".format(e)) raise return status
Upload artifacts and return status. Returns the integer status of the upload. args: context (scriptworker.context.Context): the scriptworker context. files (list of str): list of files to be uploaded as artifacts Raises: Exception: on unexpected exception. Returns: int: exit status
def edit(args): tap = DbTap.find(args.id) options = {} if not args.name is None: options["db_name"]=args.name if args.host is not None: options["db_host"]=args.host if args.user is not None: options["db_user"]=args.user if args.password is not None: options["db_passwd"] = args.password if args.type is not None: options["db_type"] = args.type if args.location is not None: options["db_location"] = args.location if args.port is not None: options["port"] = args.port tap = tap.edit(**options) return json.dumps(tap.attributes, sort_keys=True, indent=4)
Carefully setup a dict
def simple_profile(self, sex=None): SEX = ["F", "M"] if sex not in SEX: sex = self.random_element(SEX) if sex == 'F': name = self.generator.name_female() elif sex == 'M': name = self.generator.name_male() return { "username": self.generator.user_name(), "name": name, "sex": sex, "address": self.generator.address(), "mail": self.generator.free_email(), "birthdate": self.generator.date_of_birth(), }
Generates a basic profile with personal informations
def search(self): s = super(TopicSearchMixin, self).search() s = s.filter('bool', should=[ Q('term', tags=tag) for tag in self.topic.tags ]) return s
Override search to match on topic tags
def is_color_supported(): "Find out if your terminal environment supports color." if not hasattr(sys.stdout, 'isatty'): return False if not sys.stdout.isatty() and 'TERMINAL-COLOR' not in os.environ: return False if sys.platform == 'win32': try: import colorama colorama.init() return True except ImportError: return False if 'COLORTERM' in os.environ: return True term = os.environ.get('TERM', 'dumb').lower() return term in ('xterm', 'linux') or 'color' in term
Find out if your terminal environment supports color.
def _call(self, method, url, params, uploads): try: data = self._request(method, url, params, uploads) except Exception, e: self._failed_cb(e) else: self._completed_cb(data)
Initiate resquest to server and handle outcomes.
def _build_query_params(self, headers_only=False, page_size=None): params = {"name": self._project_path, "filter_": self.filter} params["interval"] = types.TimeInterval() params["interval"].end_time.FromDatetime(self._end_time) if self._start_time: params["interval"].start_time.FromDatetime(self._start_time) if ( self._per_series_aligner or self._alignment_period_seconds or self._cross_series_reducer or self._group_by_fields ): params["aggregation"] = types.Aggregation( per_series_aligner=self._per_series_aligner, cross_series_reducer=self._cross_series_reducer, group_by_fields=self._group_by_fields, alignment_period={"seconds": self._alignment_period_seconds}, ) if headers_only: params["view"] = enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS else: params["view"] = enums.ListTimeSeriesRequest.TimeSeriesView.FULL if page_size is not None: params["page_size"] = page_size return params
Return key-value pairs for the list_time_series API call. :type headers_only: bool :param headers_only: Whether to omit the point data from the :class:`~google.cloud.monitoring_v3.types.TimeSeries` objects. :type page_size: int :param page_size: (Optional) The maximum number of points in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API.
def login_server(self): local('ssh -i {0} {1}@{2}'.format( env.key_filename, env.user, env.host_string ))
Login to server
def update_object(self, form, obj): field_name = form.cleaned_data['name'] value = form.cleaned_data['value'] setattr(obj, field_name, value) save_kwargs = {} if CAN_UPDATE_FIELDS: save_kwargs['update_fields'] = [field_name] obj.save(**save_kwargs) data = json.dumps({ 'status': 'success', }) return HttpResponse(data, content_type="application/json")
Saves the new value to the target object.
def _find_solo_consonant(self, letters: List[str]) -> List[int]: solos = [] for idx, letter in enumerate(letters): if len(letter) == 1 and self._contains_consonants(letter): solos.append(idx) return solos
Find the positions of any solo consonants that are not yet paired with a vowel.
def settings_view_decorator(function): dec = settings.CLOUD_BROWSER_VIEW_DECORATOR if isinstance(dec, str): mod_str, _, dec_str = dec.rpartition('.') if not (mod_str and dec_str): raise ImportError("Unable to import module: %s" % mod_str) mod = import_module(mod_str) if not hasattr(mod, dec_str): raise ImportError("Unable to import decorator: %s" % dec) dec = getattr(mod, dec_str) if dec and callable(dec): return dec(function) return function
Insert decorator from settings, if any. .. note:: Decorator in ``CLOUD_BROWSER_VIEW_DECORATOR`` can be either a callable or a fully-qualified string path (the latter, which we'll lazy import).
def write_ensemble(ensemble, options): size = len(ensemble) filename = '%s_%s_queries.csv' % (options.outname, size) file = os.path.join(os.getcwd(), filename) f = open(file, 'w') out = ', '.join(ensemble) f.write(out) f.close()
Prints out the ensemble composition at each size
def disassociate_public_ip(self, public_ip_id): floating_ip = self.client.floating_ips.get(public_ip_id) floating_ip = floating_ip.to_dict() instance_id = floating_ip.get('instance_id') address = floating_ip.get('ip') self.client.servers.remove_floating_ip(instance_id, address) return True
Disassociate a external IP
def split_into_batches(input_list, batch_size, batch_storage_dir, checkpoint=False): if checkpoint and not os.path.exists(batch_storage_dir): os.mkdir(batch_storage_dir) batches = [ { 'index': batch_index, 'data': input_list[start_index:start_index + batch_size], 'input_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-input.pickle'.format(batch_index)), 'result_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-output.pickle'.format(batch_index)), } for batch_index, start_index in enumerate(range(0, len(input_list), batch_size)) ] if checkpoint: for batch in batches: save(batch['data'], batch['input_filename']) return batches
Break the input data into smaller batches, optionally saving each one to disk. Args: input_list: An input object that has a list-like interface (indexing and slicing). batch_size: The maximum number of input items in each batch. batch_storage_dir: The directory to save the checkpoints to. checkpoint: Whether to save each batch to a file. Returns: A list of batch objects with the following structure: {'index', 'data', 'input_filename', 'result_filename'}
def decode_wireformat_uuid(rawguid): if isinstance(rawguid, list): rawguid = bytearray(rawguid) lebytes = struct.unpack_from('<IHH', buffer(rawguid[:8])) bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:])) return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format( lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2])
Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output.
def as_list(self): return [self.name, self.value, [x.as_list for x in self.children]]
Return all child objects in nested lists of strings.
def add_camera_make_model(self, make, model): self._ef['0th'][piexif.ImageIFD.Make] = make self._ef['0th'][piexif.ImageIFD.Model] = model
Add camera make and model.
def todo(self): if not os.path.exists(self.migrate_dir): self.logger.warn('Migration directory: %s does not exist.', self.migrate_dir) os.makedirs(self.migrate_dir) return sorted(f[:-3] for f in os.listdir(self.migrate_dir) if self.filemask.match(f))
Scan migrations in file system.
def searchAccount(searchTerm, book): print("Search results:\n") found = False for account in book.accounts: if searchTerm.lower() in account.fullname.lower(): print(account.fullname) found = True if not found: print("Search term not found in account names.")
Searches through account names
def handle_sketch_name(msg): if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].sketch_name = msg.payload msg.gateway.alert(msg) return None
Process an internal sketch name message.
def do_GET(self): if self.path.startswith(self.serve_url): from_key = self.path[len(self.serve_url):] val_res = self.decrypt_yubikey_otp(from_key) self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(val_res) self.wfile.write("\n") elif self.stats_url and self.path == self.stats_url: self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() for key in stats: self.wfile.write("%s %d\n" % (key, stats[key])) else: self.log_error("Bad URL '%s' - I'm serving '%s' (responding 403)" % (self.path, self.serve_url)) self.send_response(403, 'Forbidden') self.end_headers()
Handle a HTTP GET request.
def _assert_all_loadable_terms_specialized_to(self, domain): for term in self.graph.node: if isinstance(term, LoadableTerm): assert term.domain is domain
Make sure that we've specialized all loadable terms in the graph.
def api_call(self, opts, args=None, body=None, **kwargs): if args: path = opts['name'] % args else: path = opts['name'] path = '/api/v1%s' % path return self._request( opts['method'], path=path, payload=body, **kwargs)
Setup the request
def issuperset(self, other): if len(self) < len(other): return False return all(item in self for item in other)
Report whether this set contains another set. Example: >>> OrderedSet([1, 2]).issuperset([1, 2, 3]) False >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3}) True >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3}) False
def _format_issue_url(self): query = urlencode({ 'title': self._format_issue_title(), 'body': self._format_issue_body(), }) return self.REPO_URL + self.ISSUE_SUFFIX + '?' + query
Format full issue URL.
def convert_units(values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation,**kwargs): if numpy.isscalar(values): values = [values] float_values = [float(value) for value in values] values_to_return = convert(float_values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation) return values_to_return
Convert a value from one unit to another one. Example:: >>> cli = PluginLib.connect() >>> cli.service.convert_units(20.0, 'm', 'km') 0.02 Parameters: values: single measure or an array of measures source_measure_or_unit_abbreviation: A measure in the source unit, or just the abbreviation of the source unit, from which convert the provided measure value/values target_measure_or_unit_abbreviation: A measure in the target unit, or just the abbreviation of the target unit, into which convert the provided measure value/values Returns: Always a list
def recompute(self, quiet=False, **kwargs): if not self.computed: if not (hasattr(self, "_x") and hasattr(self, "_yerr2")): raise RuntimeError("You need to compute the model first") try: self.compute(self._x, np.sqrt(self._yerr2), **kwargs) except (ValueError, LinAlgError): if quiet: return False raise return True
Re-compute a previously computed model. You might want to do this if the kernel parameters change and the kernel is labeled as ``dirty``. :param quiet: (optional) If ``True``, return false when the computation fails. Otherwise, throw an error if something goes wrong. (default: ``False``)
def redirect(location=None, internal=False, code=None, headers={}, add_slash=False, request=None): request = request or state.request if add_slash: if location is None: split_url = list(urlparse.urlsplit(request.url)) new_proto = request.environ.get( 'HTTP_X_FORWARDED_PROTO', split_url[0] ) split_url[0] = new_proto else: split_url = urlparse.urlsplit(location) split_url[2] = split_url[2].rstrip('/') + '/' location = urlparse.urlunsplit(split_url) if not headers: headers = {} if internal: if code is not None: raise ValueError('Cannot specify a code for internal redirects') request.environ['pecan.recursive.context'] = request.context raise ForwardRequestException(location) if code is None: code = 302 raise exc.status_map[code](location=location, headers=headers)
Perform a redirect, either internal or external. An internal redirect performs the redirect server-side, while the external redirect utilizes an HTTP 302 status code. :param location: The HTTP location to redirect to. :param internal: A boolean indicating whether the redirect should be internal. :param code: The HTTP status code to use for the redirect. Defaults to 302. :param headers: Any HTTP headers to send with the response, as a dictionary. :param request: The :class:`pecan.Request` instance to use.
def notifications(self): params = {"f": "json"} url = "%s/notifications" % self.root return Notifications(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
The notifications that are available for the given user. Notifications are events that need the user's attention-application for joining a group administered by the user, acceptance of a group membership application, and so on. A notification is initially marked as new. The user can mark it as read or delete the notification.
def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): if size < 0 or offset < 0: raise ValueError fobj.seek(0, 2) filesize = fobj.tell() movesize = filesize - offset if movesize < 0: raise ValueError resize_file(fobj, size, BUFFER_SIZE) if mmap is not None: try: mmap_move(fobj, offset + size, offset, movesize) except mmap.error: fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE) else: fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE)
Insert size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or equivalent. Mutagen tries to use mmap to resize the file, but falls back to a significantly slower method if mmap fails. Args: fobj (fileobj) size (int): The amount of space to insert offset (int): The offset at which to insert the space Raises: IOError
def _setFlags(self): self.atEnd = not self.gametree.variations and (self.index + 1 == len(self.gametree)) self.atStart = not self.stack and (self.index == 0)
Sets up the flags 'self.atEnd' and 'self.atStart'.
def add_tree(self, tree, parent=None): if tree.path in self.path_db: self.remove_tree_by_path(tree.path) for index in tree.indexes: if not getattr(tree, index): continue self._add_to( getattr(self, index + "_db"), getattr(tree, index), tree, ) if parent: self._add_to(self.parent_db, tree.path, parent) for sub_tree in tree.sub_trees: assert sub_tree.path.startswith(tree.path) for sub_tree in tree.sub_trees: self.add_tree(sub_tree, parent=tree)
Add `tree` into database. Args: tree (obj): :class:`.Tree` instance. parent (ref, default None): Reference to parent tree. This is used for all sub-trees in recursive call.
def update(self, num): num = float(num) self.count += 1 self.low = min(self.low, num) self.high = max(self.high, num) delta = num - self.mean self.mean = self.mean + delta / self.count delta2 = num - self.mean self._rolling_variance = self._rolling_variance + delta * delta2 if self.count > 1: self.deviation = math.sqrt(self._rolling_variance / (self.count - 1)) else: self.deviation = 0.0
Update metrics with the new number.
def _get_deps(self, tree, include_punct, representation, universal): if universal: converter = self.universal_converter if self.universal_converter == self.converter: import warnings warnings.warn("This jar doesn't support universal " "dependencies, falling back to Stanford " "Dependencies. To suppress this message, " "call with universal=False") else: converter = self.converter if include_punct: egs = converter(tree, self.acceptFilter) else: egs = converter(tree) if representation == 'basic': deps = egs.typedDependencies() elif representation == 'collapsed': deps = egs.typedDependenciesCollapsed(True) elif representation == 'CCprocessed': deps = egs.typedDependenciesCCprocessed(True) else: assert representation == 'collapsedTree' deps = egs.typedDependenciesCollapsedTree() return self._listify(deps)
Get a list of dependencies from a Stanford Tree for a specific Stanford Dependencies representation.
def _on_closed(self): LOGGER.error('Redis connection closed') self.connected = False self._on_close() self._stream = None
Invoked when the connection is closed
def reset(self): self.update_widgets() for column in self._columns: for widget in column: widget.reset() widget.blur() self._live_widget = -1 self._find_next_widget(1)
Reset this Layout and the Widgets it contains.
def on_nick(self, connection, event): old_nickname = self.get_nickname(event) old_color = self.nicknames.pop(old_nickname) new_nickname = event.target() message = "is now known as %s" % new_nickname self.namespace.emit("message", old_nickname, message, old_color) new_color = color(new_nickname) self.nicknames[new_nickname] = new_color self.emit_nicknames() if self.nickname == old_nickname: self.nickname = new_nickname
Someone changed their nickname - send the nicknames list to the WebSocket.
def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs): TEXT = data.Field() LABEL = data.Field(sequential=False) train, val, test = cls.splits(TEXT, LABEL, root=root, **kwargs) TEXT.build_vocab(train, vectors=vectors) LABEL.build_vocab(train) return data.BucketIterator.splits( (train, val, test), batch_size=batch_size, device=device)
Create iterator objects for splits of the SST dataset. Arguments: batch_size: Batch_size device: Device to create batches on. Use - 1 for CPU and None for the currently active GPU device. root: The root directory that the dataset's zip archive will be expanded into; therefore the directory in whose trees subdirectory the data files will be stored. vectors: one of the available pretrained vectors or a list with each element one of the available pretrained vectors (see Vocab.load_vectors) Remaining keyword arguments: Passed to the splits method.
def a2bits_list(chars: str, encoding: str = "UTF-8") -> List[str]: return [bin(ord(x))[2:].rjust(ENCODINGS[encoding], "0") for x in chars]
Convert a string to its bits representation as a list of 0's and 1's. >>> a2bits_list("Hello World!") ['01001000', '01100101', '01101100', '01101100', '01101111', '00100000', '01010111', '01101111', '01110010', '01101100', '01100100', '00100001'] >>> "".join(a2bits_list("Hello World!")) '010010000110010101101100011011000110111100100000010101110110111101110010011011000110010000100001'
def tar_extract(cls, tar_comp_file_path): try: with contextlib.closing(tarfile.open(tar_comp_file_path)) as tar: tar.extractall() except tarfile.ReadError as e: message_format = ( 'Extract failed: ' 'tar_comp_file_path: {0}, reason: {1}' ) raise InstallError(message_format.format(tar_comp_file_path, e))
Extract tar.gz or tar bz2 file. It behaves like - tar xzf tar_gz_file_path - tar xjf tar_bz2_file_path It raises tarfile.ReadError if the file is broken.
def name(self, name): success = idaapi.set_enum_member_name(self.cid, name) if not success: raise exceptions.CantRenameEnumMember( "Failed renaming {!r} to {!r}. Does the name exist somewhere else?".format(self.name, name))
Set the member name. Note that a member name cannot appear in other enums, or generally anywhere else in the IDB.
def profile_cancel(self, query_id, timeout=10): result = Result(*self.perform_request(**{ 'method': 'GET', 'url': '/profiles/cancel/{0}'.format(query_id), 'params': { 'request_timeout': timeout } })) return result
Cancel the query that has the given queryid. :param query_id: The UUID of the query in standard UUID format that Drill assigns to each query. :param timeout: int :return: pydrill.client.Result
def statuses(self): r_json = self._get_json('status') statuses = [Status(self._options, self._session, raw_stat_json) for raw_stat_json in r_json] return statuses
Get a list of status Resources from the server. :rtype: List[Status]
def _factory(slice_, axis, weighted): if slice_.dim_types[0] == DT.MR_SUBVAR: return _MrXCatPairwiseSignificance(slice_, axis, weighted) return _CatXCatPairwiseSignificance(slice_, axis, weighted)
return subclass for PairwiseSignificance, based on slice dimension types.
def load_from_file(cls, filename_prefix): filename = cls._filename(filename_prefix) lines, _ = cls._read_lines_from_file(filename) vocab_list = [line[1:-1] for line in lines] return cls(vocab_list=vocab_list)
Extracts list of subwords from file.
def extract_bzip2 (archive, compression, cmd, verbosity, interactive, outdir): targetname = util.get_single_outfile(outdir, archive) try: with bz2.BZ2File(archive) as bz2file: with open(targetname, 'wb') as targetfile: data = bz2file.read(READ_SIZE_BYTES) while data: targetfile.write(data) data = bz2file.read(READ_SIZE_BYTES) except Exception as err: msg = "error extracting %s to %s: %s" % (archive, targetname, err) raise util.PatoolError(msg) return None
Extract a BZIP2 archive with the bz2 Python module.
def info(self, name, description, labelnames=None, labelvalues=None, **labels): if labels and labelnames: raise ValueError( 'Cannot have labels defined as `dict` ' 'and collections of names and values' ) if labelnames is None and labels: labelnames = labels.keys() elif labelnames and labelvalues: for idx, label_name in enumerate(labelnames): labels[label_name] = labelvalues[idx] gauge = Gauge( name, description, labelnames or tuple(), registry=self.registry ) if labels: gauge = gauge.labels(**labels) gauge.set(1) return gauge
Report any information as a Prometheus metric. This will create a `Gauge` with the initial value of 1. The easiest way to use it is: metrics = PrometheusMetrics(app) metrics.info( 'app_info', 'Application info', version='1.0', major=1, minor=0 ) If the order of the labels matters: metrics = PrometheusMetrics(app) metrics.info( 'app_info', 'Application info', ('version', 'major', 'minor'), ('1.0', 1, 0) ) :param name: the name of the metric :param description: the description of the metric :param labelnames: the names of the labels :param labelvalues: the values of the labels :param labels: the names and values of the labels :return: the newly created `Gauge` metric
def create_system(self, **system_options): if self.master is None: raise ValueError('Handler {} is not able to create systems.'.format(self)) if isinstance(self.master, ForceField): system = self.master.createSystem(self.topology, **system_options) elif isinstance(self.master, (AmberPrmtopFile, GromacsTopFile, DesmondDMSFile)): system = self.master.createSystem(**system_options) elif isinstance(self.master, CharmmPsfFile): if not hasattr(self.master, 'parmset'): raise ValueError('PSF topology files require Charmm parameters.') system = self.master.createSystem(self.master.parmset, **system_options) else: raise NotImplementedError('Handler {} is not able to create systems.'.format(self)) if self.has_box: system.setDefaultPeriodicBoxVectors(*self.box) return system
Create an OpenMM system for every supported topology file with given system options
def delete_events( self, project_name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "delete_events" not in self._inner_api_calls: self._inner_api_calls[ "delete_events" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_events, default_retry=self._method_configs["DeleteEvents"].retry, default_timeout=self._method_configs["DeleteEvents"].timeout, client_info=self._client_info, ) request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("project_name", project_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["delete_events"]( request, retry=retry, timeout=timeout, metadata=metadata )
Deletes all error events of a given project. Example: >>> from google.cloud import errorreporting_v1beta1 >>> >>> client = errorreporting_v1beta1.ErrorStatsServiceClient() >>> >>> project_name = client.project_path('[PROJECT]') >>> >>> response = client.delete_events(project_name) Args: project_name (str): [Required] The resource name of the Google Cloud Platform project. Written as ``projects/`` plus the `Google Cloud Platform project ID <https://support.google.com/cloud/answer/6158840>`__. Example: ``projects/my-project-123``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def restrict_to_dtype(dtype, message_template): def processor(term_method, _, term_instance): term_dtype = term_instance.dtype if term_dtype != dtype: raise TypeError( message_template.format( method_name=term_method.__name__, expected_dtype=dtype.name, received_dtype=term_dtype, ) ) return term_instance return preprocess(self=processor)
A factory for decorators that restrict Term methods to only be callable on Terms with a specific dtype. This is conceptually similar to zipline.utils.input_validation.expect_dtypes, but provides more flexibility for providing error messages that are specifically targeting Term methods. Parameters ---------- dtype : numpy.dtype The dtype on which the decorated method may be called. message_template : str A template for the error message to be raised. `message_template.format` will be called with keyword arguments `method_name`, `expected_dtype`, and `received_dtype`. Examples -------- @restrict_to_dtype( dtype=float64_dtype, message_template=( "{method_name}() was called on a factor of dtype {received_dtype}." "{method_name}() requires factors of dtype{expected_dtype}." ), ) def some_factor_method(self, ...): self.stuff_that_requires_being_float64(...)
def getRequest(self): ars = self.getLinkedRequests() if len(ars) > 1: ar_ids = ", ".join(map(api.get_id, ars)) logger.info("Attachment assigned to more than one AR: [{}]. " "The first AR will be returned".format(ar_ids)) if len(ars) >= 1: return ars[0] analysis = self.getAnalysis() if IRequestAnalysis.providedBy(analysis): return analysis.getRequest() return None
Return the primary AR this attachment is linked
def get_by_id(self, id_networkv6): uri = 'api/networkv4/%s/' % id_networkv6 return super(ApiNetworkIPv6, self).get(uri)
Get IPv6 network :param id_networkv4: ID for NetworkIPv6 :return: IPv6 Network
def keep_tc_pos(func): @functools.wraps(func) def wrapper(editor, *args, **kwds): sb = editor.verticalScrollBar() spos = sb.sliderPosition() pos = editor.textCursor().position() retval = func(editor, *args, **kwds) text_cursor = editor.textCursor() text_cursor.setPosition(pos) editor.setTextCursor(text_cursor) sb.setSliderPosition(spos) return retval return wrapper
Cache text cursor position and restore it when the wrapped function exits. This decorator can only be used on modes or panels. :param func: wrapped function
def optimize(exp_rets, covs): _cov_inv = np.linalg.inv(covs) _u = np.ones((len(exp_rets))) _u_cov_inv = _u.dot(_cov_inv) _rets_cov_inv = exp_rets.dot(_cov_inv) _m = np.empty((2, 2)) _m[0, 0] = _rets_cov_inv.dot(exp_rets) _m[0, 1] = _u_cov_inv.dot(exp_rets) _m[1, 0] = _rets_cov_inv.dot(_u) _m[1, 1] = _u_cov_inv.dot(_u) _m_inv = np.linalg.inv(_m) a = _m_inv[0, 0] * _rets_cov_inv + _m_inv[1, 0] * _u_cov_inv b = _m_inv[0, 1] * _rets_cov_inv + _m_inv[1, 1] * _u_cov_inv least_risk_ret = _m[0, 1] / _m[1, 1] return a, b, least_risk_ret
Return parameters for portfolio optimization. Parameters ---------- exp_rets : ndarray Vector of expected returns for each investment.. covs : ndarray Covariance matrix for the given investments. Returns --------- a : ndarray The first vector (to be combined with target return as scalar) in the linear equation for optimal weights. b : ndarray The second (constant) vector in the linear equation for optimal weights. least_risk_ret : int The return achieved on the portfolio that combines the given equities so as to achieve the lowest possible risk. Notes --------- * The length of `exp_rets` must match the number of rows and columns in the `covs` matrix. * The weights for an optimal portfolio with expected return `ret` is given by the formula `w = ret * a + b` where `a` and `b` are the vectors returned here. The weights `w` for the portfolio with lowest risk are given by `w = least_risk_ret * a + b`. * An exception will be raised if the covariance matrix is singular or if each prospective investment has the same expected return.
def _serialize(self, uri, node): meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta }
Serialize node result as dict
def cache_key(self, request, method=None): if method is None: method = request.method return "bettercache_page:%s:%s" %(request.build_absolute_uri(), method)
the cache key is the absolute uri and the request method
def image_server_response(self, api_version=None): headers = dict(self.headers) if (api_version < '1.1'): headers['Content-Type'] = 'text/xml' response = self.as_xml() else: headers['Content-Type'] = 'text/plain' response = self.as_txt() return(response, self.code, headers)
Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response.
def get_plugins(modules, classobj): for module in modules: for plugin in get_module_plugins(module, classobj): yield plugin
Find all class objects in all modules. @param modules: the modules to search @ptype modules: iterator of modules @return: found classes @rytpe: iterator of class objects
def _get_partial(name, partials_dict, partials_path, partials_ext): try: return partials_dict[name] except KeyError: try: path_ext = ('.' + partials_ext if partials_ext else '') path = partials_path + '/' + name + path_ext with io.open(path, 'r', encoding='utf-8') as partial: return partial.read() except IOError: return ''
Load a partial
def start(vm_name, call=None): if call != 'action': raise SaltCloudSystemExit( 'The start action must be called with -a or --action.' ) conn = get_conn() __utils__['cloud.fire_event']( 'event', 'start instance', 'salt/cloud/{0}/starting'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) result = conn.ex_start_node( conn.ex_get_node(vm_name) ) __utils__['cloud.fire_event']( 'event', 'start instance', 'salt/cloud/{0}/started'.format(vm_name), args={'name': vm_name}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return result
Call GCE 'start on the instance. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt-cloud -a start myinstance
def to_arrow_schema(schema): import pyarrow as pa fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable) for field in schema] return pa.schema(fields)
Convert a schema from Spark to Arrow
def detect_django_settings(): matches = [] for root, dirnames, filenames in os.walk(os.getcwd()): for filename in fnmatch.filter(filenames, '*settings.py'): full = os.path.join(root, filename) if 'site-packages' in full: continue full = os.path.join(root, filename) package_path = full.replace(os.getcwd(), '') package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') matches.append(package_module) return matches
Automatically try to discover Django settings files, return them as relative module paths.
def passive_aggressive_train(self): self._clf = PassiveAggressiveClassifier(n_iter=50, C=0.2, n_jobs=-1, random_state=0) self._clf.fit(self._term_doc_matrix._X, self._term_doc_matrix._y) y_dist = self._clf.decision_function(self._term_doc_matrix._X) pos_ecdf = ECDF(y_dist[y_dist >= 0]) neg_ecdf = ECDF(y_dist[y_dist <= 0]) def proba_function(distance_from_hyperplane): if distance_from_hyperplane > 0: return pos_ecdf(distance_from_hyperplane) / 2. + 0.5 elif distance_from_hyperplane < 0: return pos_ecdf(distance_from_hyperplane) / 2. return 0.5 self._proba = proba_function return self
Trains passive aggressive classifier
def compile_less(input_file, output_file): from .modules import less if not isinstance(input_file, str): raise RuntimeError('LESS compiler takes only a single input file.') return { 'dependencies_fn': less.less_dependencies, 'compiler_fn': less.less_compile, 'input': input_file, 'output': output_file, 'kwargs': {}, }
Compile a LESS source file. Minifies the output in release mode.
def CMN(self, params): Ra, Rb = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params) self.check_arguments(low_registers=(Ra, Rb)) def CMN_func(): self.set_NZCV_flags(self.register[Ra], self.register[Rb], self.register[Ra] + self.register[Rb], 'add') return CMN_func
CMN Ra, Rb Add the two registers and set the NZCV flags The result is discarded Ra and Rb must be low registers
def create_book(self, name): name = name.strip() if not len(name): self.error("Cannot have a blank book name") if name.find(",") >= 0: self.error("Cannot have a ',' in a book name") existing = self.list_books() nexisting = len(existing) if name in existing: self.error("Already have a book named '%s'" % name) try: self.cur.execute("INSERT INTO book (number, name) VALUES(?, ?);", (nexisting, name)) self.con.commit() except: self.fyi("Error adding a book named '%s'" % name)
Create a new book
def Decorate(cls, class_name, member, parent_member): if isinstance(member, property): fget = cls.DecorateMethod(class_name, member.fget, parent_member) fset = None if member.fset: fset = cls.DecorateMethod(class_name, member.fset, parent_member) fdel = None if member.fdel: fdel = cls.DecorateMethod(class_name, member.fdel, parent_member) return property(fget, fset, fdel, member.__doc__) else: return cls.DecorateMethod(class_name, member, parent_member)
Decorates a member with @typecheck. Inherit checks from parent member.
def get_score_system_id(self): if not bool(self._my_map['scoreSystemId']): raise errors.IllegalState('this AssessmentOffered has no score_system') else: return Id(self._my_map['scoreSystemId'])
Gets the grade system ``Id`` for the score. return: (osid.id.Id) - the grade system ``Id`` raise: IllegalState - ``is_scored()`` is ``false`` *compliance: mandatory -- This method must be implemented.*
def read_only_s3_bucket_policy_statements(buckets, folder="*"): list_buckets = [s3_arn(b) for b in buckets] object_buckets = [s3_objects_arn(b, folder) for b in buckets] bucket_resources = list_buckets + object_buckets return [ Statement( Effect=Allow, Resource=[s3_arn("*")], Action=[s3.ListAllMyBuckets] ), Statement( Effect=Allow, Resource=bucket_resources, Action=[Action('s3', 'Get*'), Action('s3', 'List*')] ) ]
Read only policy an s3 bucket.
def run_callbacks(obj, log=None): def run_callback(callback, args): return callback(*args) return walk_callbacks(obj, run_callback, log)
Run callbacks.
def bulk(self, actions, stats_only=False, **kwargs): success, failed = es_helpers.bulk(self.client, actions, stats_only, **kwargs) logger.info('Bulk is done success %s failed %s actions: \n %s' % (success, failed, actions))
Executes bulk api by elasticsearch.helpers.bulk. :param actions: iterator containing the actions :param stats_only:if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters.
def iter_following(username, number=-1, etag=None): return gh.iter_following(username, number, etag) if username else []
List the people ``username`` follows. :param str username: (required), login of the user :param int number: (optional), number of users being followed by username to return. Default: -1, return all of them :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`User <github3.users.User>`
def get_records(self, ids): return self.query(Ids(values=[str(id_) for id_ in ids]))
Return records by their identifiers. :param ids: A list of record identifier. :returns: A list of records.
def convert_to_cgs(self, equivalence=None, **kwargs): self.convert_to_units( self.units.get_cgs_equivalent(), equivalence=equivalence, **kwargs )
Convert the array and in-place to the equivalent cgs units. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. Parameters ---------- equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this object, try the ``list_equivalencies`` method. Default: None kwargs: optional Any additional keyword arguments are supplied to the equivalence Raises ------ If the provided unit does not have the same dimensions as the array this will raise a UnitConversionError Examples -------- >>> from unyt import Newton >>> data = [1., 2., 3.]*Newton >>> data.convert_to_cgs() >>> data unyt_array([100000., 200000., 300000.], 'dyn')
def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port)
Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof
def subdomains_init(blockstack_opts, working_dir, atlas_state): if not is_subdomains_enabled(blockstack_opts): return None subdomain_state = SubdomainIndex(blockstack_opts['subdomaindb_path'], blockstack_opts=blockstack_opts) atlas_node_add_callback(atlas_state, 'store_zonefile', subdomain_state.enqueue_zonefile) return subdomain_state
Set up subdomain state Returns a SubdomainIndex object that has been successfully connected to Atlas
def createHorizonPolygons(self): vertsTop = [[-1,0],[-1,1],[1,1],[1,0],[-1,0]] self.topPolygon = Polygon(vertsTop,facecolor='dodgerblue',edgecolor='none') self.axes.add_patch(self.topPolygon) vertsBot = [[-1,0],[-1,-1],[1,-1],[1,0],[-1,0]] self.botPolygon = Polygon(vertsBot,facecolor='brown',edgecolor='none') self.axes.add_patch(self.botPolygon)
Creates the two polygons to show the sky and ground.
def prox_yline(y, step): if not np.isscalar(y): y= y[0] if y > -0.75: return np.array([-0.75]) else: return np.array([y])
Projection onto line in y
def get_user_permissions(self, user_id): permissions = self.user_service.getPermissions(id=user_id) return sorted(permissions, key=itemgetter('keyName'))
Returns a sorted list of a users permissions
def execute(self): if self.direct: if self.file_type == 'pdf': raise IOError(u"Direct output mode is not available for PDF " "export") else: print(self.render().encode(self.encoding)) else: self.write_and_log() if self.watch: from landslide.watcher import watch self.log(u"Watching %s\n" % self.watch_dir) watch(self.watch_dir, self.write_and_log)
Execute this generator regarding its current configuration.
def is_cidr_in_cidr(small_cidr, big_cidr): if small_cidr == "0.0.0.0/0": return big_cidr == "0.0.0.0/0" else: if big_cidr == "0.0.0.0/0": return False s = ipaddress.IPv4Network(unicode(small_cidr)) b = ipaddress.IPv4Network(unicode(big_cidr)) return s.subnet_of(b)
Return True if the small CIDR is contained in the big CIDR.
def fit_lsq(self, df): tdf = df.set_index('div') return tdf.ix['1,1']['n_spp'], tdf.ix['1,1']['n_individs']
Parameterize generic SAR curve from empirical data set Parameters ---------- df : DataFrame Result data frame from empirical SAR analysis Notes ----- Simply returns S0 and N0 from empirical SAR output, which are two fixed parameters of METE SAR and EAR. This simply returns n_spp and n_individs from the 1,1 division in the dataframe. An error will be thrown if this division is not present The ``fit_lsq`` is retained for consistency with other curves.
def get_peer_id(peer, add_mark=True): if isinstance(peer, int): return peer if add_mark else resolve_id(peer)[0] if isinstance(peer, types.InputPeerSelf): _raise_cast_fail(peer, 'int (you might want to use client.get_peer_id)') try: peer = get_peer(peer) except TypeError: _raise_cast_fail(peer, 'int') if isinstance(peer, types.PeerUser): return peer.user_id elif isinstance(peer, types.PeerChat): if not (0 < peer.chat_id <= 0x7fffffff): peer.chat_id = resolve_id(peer.chat_id)[0] return -peer.chat_id if add_mark else peer.chat_id else: if not (0 < peer.channel_id <= 0x7fffffff): peer.channel_id = resolve_id(peer.channel_id)[0] if not add_mark: return peer.channel_id return -(peer.channel_id + pow( 10, math.floor(math.log10(peer.channel_id) + 3)))
Finds the ID of the given peer, and converts it to the "bot api" format so it the peer can be identified back. User ID is left unmodified, chat ID is negated, and channel ID is prefixed with -100. The original ID and the peer type class can be returned with a call to :meth:`resolve_id(marked_id)`.
def _eval_model(self): arguments = self._x_grid.copy() arguments.update({param: param.value for param in self.model.params}) return self.model(**key2str(arguments))
Convenience method for evaluating the model with the current parameters :return: named tuple with results
def clean_time(self, time): if isinstance(time, int): time = datetime.utcfromtimestamp(time) elif isinstance(time, str): time = parser.parse(time) return time
Transform time field to datetime object if there is any.
def _ToString(x): if x is None: return 'null' if isinstance(x, six.string_types): return x return pprint.pformat(x)
The default default formatter!.
def gather_commands(self, ingredient): for command_name, command in ingredient.commands.items(): yield join_paths(ingredient.path, command_name), command
Collect all commands from this ingredient and its sub-ingredients. Yields ------ cmd_name: str The full (dotted) name of the command. cmd: function The corresponding captured function.
def run_snr(self): if self.ecc: required_kwargs = {'dist_type': self.dist_type, 'initial_cond_type': self.initial_cond_type, 'ecc': True} input_args = [self.m1, self.m2, self.z_or_dist, self.initial_point, self.eccentricity, self.observation_time] else: required_kwargs = {'dist_type': self.dist_type} input_args = [self.m1, self.m2, self.spin_1, self.spin_2, self.z_or_dist, self.start_time, self.end_time] input_kwargs = {**required_kwargs, **self.general, **self.sensitivity_input, **self.snr_input, **self.parallel_input} self.final_dict = snr(*input_args, **input_kwargs) return
Run the snr calculation. Takes results from ``self.set_parameters`` and other inputs and inputs these into the snr calculator.
def rebalance_brokers(self): for rg in six.itervalues(self.cluster_topology.rgs): rg.rebalance_brokers()
Rebalance partition-count across brokers within each replication-group.
def log_setup(debug_bool): level = logging.DEBUG if debug_bool else logging.INFO logging.config.dictConfig( { "version": 1, "disable_existing_loggers": False, "formatters": { "verbose": { "format": "%(asctime)s %(levelname)-8s %(name)s %(module)s " "%(process)d %(thread)d %(message)s", "datefmt": "%Y-%m-%d %H:%M:%S", } }, "handlers": { "console": { "class": "logging.StreamHandler", "formatter": "verbose", "level": level, "stream": "ext://sys.stdout", } }, "loggers": { "": { "handlers": ["console"], "level": level, "class": "logging.StreamHandler", } }, } )
Set up logging. We output only to stdout. Instead of also writing to a log file, redirect stdout to a log file when the script is executed from cron.
def login(self, username, password, limit=10, sync=True, device_id=None): response = self.api.login( "m.login.password", user=username, password=password, device_id=device_id ) self.user_id = response["user_id"] self.token = response["access_token"] self.hs = response["home_server"] self.api.token = self.token self.device_id = response["device_id"] if self._encryption: self.olm_device = OlmDevice( self.api, self.user_id, self.device_id, **self.encryption_conf) self.olm_device.upload_identity_keys() self.olm_device.upload_one_time_keys() if sync: self.sync_filter = '{ "room": { "timeline" : { "limit" : %i } } }' % limit self._sync() return self.token
Login to the homeserver. Args: username (str): Account username password (str): Account password limit (int): Deprecated. How many messages to return when syncing. This will be replaced by a filter API in a later release. sync (bool): Optional. Whether to initiate a /sync request after logging in. device_id (str): Optional. ID of the client device. The server will auto-generate a device_id if this is not specified. Returns: str: Access token Raises: MatrixRequestError