code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _runargs(argstring): import shlex parser = cli.make_arg_parser() args = parser.parse_args(shlex.split(argstring)) run(args)
Entrypoint for debugging
def get_token(self, url): parsed_url = urlparse.urlsplit(url) parsed_url = parsed_url._replace(path='/authorization/api') self.url = urlparse.urlunsplit(parsed_url) response = self.request(method='GET', url='/v1/token?url=' + url) return response.result.text
Retrieves a temporary access token
def fetch(cls, id, api_key=None, endpoint=None, add_headers=None, **kwargs): if endpoint is None: endpoint = cls.get_endpoint() inst = cls(api_key=api_key) parse_key = cls.sanitize_ep(endpoint).split("/")[-1] endpoint = '/'.join((endpoint, id)) data = cls._parse(inst.request('GET', endpoint=endpoint, add_headers=add_headers, query_params=kwargs), key=parse_key) inst._set(data) return inst
Fetch a single entity from the API endpoint. Used when you know the exact ID that must be queried.
def has_stack(self, s): for t in self.transitions: if t.lhs[s].position != 0: return False if t.rhs[s].position != 0: return False return True
Tests whether store `s` is a stack, that is, it never moves from position 0.
def housecode_to_index(housecode): match = re.search(r'^([A-P])(\d{1,2})$', housecode.upper()) if match: house_index = int(match.group(2)) if 1 <= house_index <= 16: return (ord(match.group(1)) - ord('A')) * 16 + house_index - 1 raise ValueError("Invalid X10 housecode: %s" % housecode)
Convert a X10 housecode to a zero-based index
def remove(self, w): self.wpoints.remove(w) self.last_change = time.time() self.reindex()
remove a waypoint
def _validate_options(options, service_name, add_error): if options is None: return if not isdict(options): add_error('service {} has malformed options'.format(service_name))
Lazily validate the options, ensuring that they are a dict. Use the given add_error callable to register validation error.
def load(self, filename=None): assert not self.__flag_loaded, "File can be loaded only once" if filename is None: filename = self.default_filename assert filename is not None, \ "{0!s} class has no default filename".format(self.__class__.__name__) size = os.path.getsize(filename) if size == 0: raise RuntimeError("Empty file: '{0!s}'".format(filename)) self._test_magic(filename) self._do_load(filename) self.filename = filename self.__flag_loaded = True
Loads file and registers filename as attribute.
def _build_tree(self): if not self.nn_ready: self.kdtree = scipy.spatial.cKDTree(self.data) self.nn_ready = True
Build the KDTree for the observed data
def topoff(cls, amount): for user in get_user_model().objects.all(): cls.topoff_user(user, amount)
Ensure all users have a minimum number of invites.
def with_port(self, port): if port is not None and not isinstance(port, int): raise TypeError("port should be int or None, got {}".format(type(port))) if not self.is_absolute(): raise ValueError("port replacement is not allowed " "for relative URLs") val = self._val return URL( self._val._replace( netloc=self._make_netloc( val.username, val.password, val.hostname, port, encode=False ) ), encoded=True, )
Return a new URL with port replaced. Clear port to default if None is passed.
def _compile_dimension_size(self, base_index, array, property, sized_elements): sort_index = base_index + 2 sized_elements.sort(key=lambda x: x[sort_index]) for element_data in sized_elements: start, end = element_data[base_index], element_data[sort_index] end += start element, size = element_data[4:6] set_size = sum(array[start:end]) + (end-start-1)*self.margin extra_space_needed = getattr(size, property) - set_size if extra_space_needed < 0: continue extra_space_each = extra_space_needed / (end-start) for index in range(start, end): array[index] += extra_space_each
Build one set of col widths or row heights.
def create(cls, parent, child, relation_type, index=None): try: with db.session.begin_nested(): obj = cls(parent_id=parent.id, child_id=child.id, relation_type=relation_type, index=index) db.session.add(obj) except IntegrityError: raise Exception("PID Relation already exists.") return obj
Create a PID relation for given parent and child.
def possible_moves(self, position): for move in itertools.chain(*[self.add(fn, position) for fn in self.cardinal_directions]): yield move for move in self.add_castle(position): yield move
Generates list of possible moves :type: position: Board :rtype: list
def put_file(self, key, file): if isinstance(file, str): return self._put_filename(key, file) else: return self._put_file(key, file)
Store into key from file on disk Stores data from a source into key. *file* can either be a string, which will be interpretet as a filename, or an object with a *read()* method. If the passed object has a *fileno()* method, it may be used to speed up the operation. The file specified by *file*, if it is a filename, may be removed in the process, to avoid copying if possible. If you need to make a copy, pass the opened file instead. :param key: The key under which the data is to be stored :param file: A filename or an object with a read method. If a filename, may be removed :returns: The key under which data was stored :raises exceptions.ValueError: If the key is not valid. :raises exceptions.IOError: If there was a problem moving the file in.
def _compose_func(func, args_func=lambda req_info: [req_info.index]): return FuncInfo(func=func, args_func=args_func)
Compose function used to compose arguments to function. Arguments for the functions are composed from the :class:`.RequestInfo` object from the ZODB.
def make_spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000): dist = np.sqrt(np.linspace(0., 1., resolution)) if ccw: direction = 1. else: direction = -1. angle = dist * spirals * np.pi * 2. * direction spiral_texture = ( (np.cos(angle) * dist / 2.) + 0.5, (np.sin(angle) * dist / 2.) + 0.5 ) return spiral_texture
Makes a texture consisting of a spiral from the origin. Args: spirals (float): the number of rotations to make ccw (bool): make spirals counter-clockwise (default is clockwise) offset (float): if non-zero, spirals start offset by this amount resolution (int): number of midpoints along the spiral Returns: A texture.
def localCheckpoint(self, eager=True): jdf = self._jdf.localCheckpoint(eager) return DataFrame(jdf, self.sql_ctx)
Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental
def identify_modules(*args, **kwargs): if len(args) == 1: path_template = "%(file)s" error_template = "Module '%(mod)s' not found (%(error)s)" else: path_template = "%(mod)s: %(file)s" error_template = "%(mod)s: not found (%(error)s)" for modulename in args: try: filepath = identify_filepath(modulename, **kwargs) except ModuleNotFound: exc = sys.exc_info()[1] sys.stderr.write(error_template % { 'mod': modulename, 'error': str(exc), }) sys.stderr.write('\n') else: print(path_template % { 'mod': modulename, 'file': filepath })
Find the disk locations of the given named modules, printing the discovered paths to stdout and errors discovering paths to stderr. Any provided keyword arguments are passed to `identify_filepath()`.
def comments(self, ticket, include_inline_images=False): return self._query_zendesk(self.endpoint.comments, 'comment', id=ticket, include_inline_images=repr(include_inline_images).lower())
Retrieve the comments for a ticket. :param ticket: Ticket object or id :param include_inline_images: Boolean. If `True`, inline image attachments will be returned in each comments' `attachments` field alongside non-inline attachments
def get_status(self): return { "host": self.__hostid, "status": self._service_status_announced, "statustext": CommonService.human_readable_state.get( self._service_status_announced ), "service": self._service_name, "serviceclass": self._service_class_name, "utilization": self._utilization.report(), "workflows": workflows.version(), }
Returns a dictionary containing all relevant status information to be broadcast across the network.
def move(self, bearing, distance): lat = self.pkt['I105']['Lat']['val'] lon = self.pkt['I105']['Lon']['val'] (lat, lon) = mp_util.gps_newpos(lat, lon, bearing, distance) self.setpos(lat, lon)
move position by bearing and distance
def parse_group_address(addr): if addr is None: raise KNXException("No address given") res = None if re.match('[0-9]+$', addr): res = int(addr) match = re.match("([0-9]+)/([0-9]+)$", addr) if match: main = match.group(1) sub = match.group(2) res = int(main) * 2048 + int(sub) match = re.match("([0-9]+)/([0-9]+)/([0-9]+)$", addr) if match: main = match.group(1) middle = match.group(2) sub = match.group(3) res = int(main) * 256 * 8 + int(middle) * 256 + int(sub) if res is None: raise KNXException("Address {} does not match any address scheme". format(addr)) return res
Parse KNX group addresses and return the address as an integer. This allows to convert x/x/x and x/x address syntax to a numeric KNX group address
def validate(self, csdl, service='facebook'): return self.request.post('validate', data=dict(csdl=csdl))
Validate the given CSDL :param csdl: The CSDL to be validated for analysis :type csdl: str :param service: The service for this API call (facebook, etc) :type service: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
def epanechnikovKernel(x,ref_x,h=1.0): u = (x-ref_x)/h these = np.abs(u) <= 1.0 out = np.zeros_like(x) out[these] = 0.75*(1.0-u[these]**2.0) return out
The Epanechnikov kernel. Parameters ---------- x : np.array Values at which to evaluate the kernel x_ref : float The reference point h : float Kernel bandwidth Returns ------- out : np.array Kernel values at each value of x
def trigger_modified(self, filepath): mod_time = self._get_modified_time(filepath) if mod_time > self._watched_files.get(filepath, 0): self._trigger('modified', filepath) self._watched_files[filepath] = mod_time
Triggers modified event if the given filepath mod time is newer.
def background_at_centroid(self): from scipy.ndimage import map_coordinates if self._background is not None: if (self._is_completely_masked or np.any(~np.isfinite(self.centroid))): return np.nan * self._background_unit else: value = map_coordinates(self._background, [[self.ycentroid.value], [self.xcentroid.value]], order=1, mode='nearest')[0] return value * self._background_unit else: return None
The value of the ``background`` at the position of the source centroid. The background value at fractional position values are determined using bilinear interpolation.
def update(self, key_vals=None, overwrite=True): if not key_vals: return write_items = self._update(key_vals, overwrite) self._root._root_set(self._path, write_items) self._root._write(commit=True)
Locked keys will be overwritten unless overwrite=False. Otherwise, written keys will be added to the "locked" list.
def _locate_settings(settings=''): "Return the path to the DJANGO_SETTINGS_MODULE" import imp import sys sys.path.append(os.getcwd()) settings = settings or os.getenv('DJANGO_SETTINGS_MODULE') if settings: parts = settings.split('.') f = imp.find_module(parts[0])[1] args = [f] + parts[1:] path = os.path.join(*args) path = path + '.py' if os.path.exists(path): return path
Return the path to the DJANGO_SETTINGS_MODULE
def notify( self, method_name: str, *args: Any, trim_log_values: Optional[bool] = None, validate_against_schema: Optional[bool] = None, **kwargs: Any ) -> Response: return self.send( Notification(method_name, *args, **kwargs), trim_log_values=trim_log_values, validate_against_schema=validate_against_schema, )
Send a JSON-RPC request, without expecting a response. Args: method_name: The remote procedure's method name. args: Positional arguments passed to the remote procedure. kwargs: Keyword arguments passed to the remote procedure. trim_log_values: Abbreviate the log entries of requests and responses. validate_against_schema: Validate response against the JSON-RPC schema.
def filter_yn(string, default=None): if string.startswith(('Y', 'y')): return True elif string.startswith(('N', 'n')): return False elif not string and default is not None: return True if default else False raise InvalidInputError
Return True if yes, False if no, or the default.
def scan (data, clamconf): try: scanner = ClamdScanner(clamconf) except socket.error: errmsg = _("Could not connect to ClamAV daemon.") return ([], [errmsg]) try: scanner.scan(data) finally: scanner.close() return scanner.infected, scanner.errors
Scan data for viruses. @return (infection msgs, errors) @rtype ([], [])
def start_child_span(operation_name, tracer=None, parent=None, tags=None): tracer = tracer or opentracing.tracer return tracer.start_span( operation_name=operation_name, child_of=parent.context if parent else None, tags=tags )
Start a new span as a child of parent_span. If parent_span is None, start a new root span. :param operation_name: operation name :param tracer: Tracer or None (defaults to opentracing.tracer) :param parent: parent Span or None :param tags: optional tags :return: new span
async def fire(self, name, payload=None, *, dc=None, node=None, service=None, tag=None): params = { "dc": dc, "node": extract_pattern(node), "service": extract_pattern(service), "tag": extract_pattern(tag) } payload = encode_value(payload) if payload else None response = await self._api.put( "/v1/event/fire", name, data=payload, params=params, headers={"Content-Type": "application/octet-stream"}) result = format_event(response.body) return result
Fires a new event Parameters: name (str): Event name payload (Payload): Opaque data node (Filter): Regular expression to filter by node name service (Filter): Regular expression to filter by service tag (Filter): Regular expression to filter by service tags dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: where value is event ID The return body is like:: { "ID": "b54fe110-7af5-cafc-d1fb-afc8ba432b1c", "Name": "deploy", "Payload": None, "NodeFilter": re.compile("node-\d+"), "ServiceFilter": "", "TagFilter": "", "Version": 1, "LTime": 0 } The **ID** field uniquely identifies the newly fired event.
def __get_cfg_pkgs_rpm(self): out, err = self._syscall('rpm', None, None, '-qa', '--configfiles', '--queryformat', '%{name}-%{version}-%{release}\\n') data = dict() pkg_name = None pkg_configs = [] out = salt.utils.stringutils.to_str(out) for line in out.split(os.linesep): line = line.strip() if not line: continue if not line.startswith("/"): if pkg_name and pkg_configs: data[pkg_name] = pkg_configs pkg_name = line pkg_configs = [] else: pkg_configs.append(line) if pkg_name and pkg_configs: data[pkg_name] = pkg_configs return data
Get packages with configuration files on RPM systems.
def clean_before_output(kw_matches): filtered_kw_matches = {} for kw_match, info in iteritems(kw_matches): if not kw_match.nostandalone: filtered_kw_matches[kw_match] = info return filtered_kw_matches
Return a clean copy of the keywords data structure. Stripped off the standalone and other unwanted elements.
def remove_go(self, target): with self.lock: if not self._go: try: self.job_queue.remove(target) except ValueError: pass
FOR SAVING MEMORY
def get_evcodes_all(self, inc_set=None, exc_set=None): codes = self._get_grps_n_codes(inc_set) if inc_set else set(self.code2nt) if exc_set: codes.difference_update(self._get_grps_n_codes(exc_set)) return codes
Get set of evidence codes given include set and exclude set
def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter): path = self.__format_path(path) if auto_add and not mask & IN_CREATE: mask |= IN_CREATE wd = self._inotify_wrapper.inotify_add_watch(self._fd, path, mask) if wd < 0: return wd watch = Watch(wd=wd, path=path, mask=mask, proc_fun=proc_fun, auto_add=auto_add, exclude_filter=exclude_filter) self._wmd[wd] = watch log.debug('New %s', watch) return wd
Add a watch on path, build a Watch object and insert it in the watch manager dictionary. Return the wd value.
def _trace_filename(self): dir_stub = '' if self.output_directory is not None: dir_stub = self.output_directory if self.each_time: filename = '{0}_{1}.json'.format( self.output_file_name, self.counter) else: filename = '{0}.json'.format(self.output_file_name) return os.path.join(dir_stub, filename)
Creates trace filename.
def get_nodes(code, desired_type, path="__main__", mode="exec", tree=None): return _GetVisitor(parse(code, path, mode, tree), desired_type).result
Find all nodes of a given type Arguments: code -- code text desired_type -- ast Node or tuple Keyword Arguments: path -- code path mode -- execution mode (exec, eval, single) tree -- current tree, if it was optimized
def unsubscribe_url(self): server_relative = ('%s?s=%s' % (reverse('tidings.unsubscribe', args=[self.pk]), self.secret)) return 'https://%s%s' % (Site.objects.get_current().domain, server_relative)
Return the absolute URL to visit to delete me.
def redef(obj, key, value, **kwargs): return Redef(obj, key, value=value, **kwargs)
A static constructor helper function
def _qr_factor_full(a, dtype=np.float): n, m = a.shape packed, pmut, rdiag, acnorm = \ _manual_qr_factor_packed(a, dtype) r = np.zeros((n, m)) for i in range(n): r[i,:i] = packed[i,:i] r[i,i] = rdiag[i] q = np.eye(m) v = np.empty(m) for i in range(n): v[:] = packed[i] v[:i] = 0 hhm = np.eye(m) - 2 * np.outer(v, v) / np.dot(v, v) q = np.dot(hhm, q) return q, r, pmut
Compute the QR factorization of a matrix, with pivoting. Parameters: a - An n-by-m arraylike, m >= n. dtype - (optional) The data type to use for computations. Default is np.float. Returns: q - An m-by-m orthogonal matrix (q q^T = ident) r - An n-by-m upper triangular matrix pmut - An n-element permutation vector The returned values will satisfy the equation np.dot(r, q) == a[:,pmut] The outputs are computed indirectly via the function _qr_factor_packed. If you need to compute q and r matrices in production code, there are faster ways to do it. This function is for testing _qr_factor_packed. The permutation vector pmut is a vector of the integers 0 through n-1. It sorts the rows of 'a' by their norms, so that the pmut[i]'th row of 'a' has the i'th biggest norm.
def deep_merge_dict(base, priority): if not isinstance(base, dict) or not isinstance(priority, dict): return priority result = copy.deepcopy(base) for key in priority.keys(): if key in base: result[key] = deep_merge_dict(base[key], priority[key]) else: result[key] = priority[key] return result
Recursively merges the two given dicts into a single dict. Treating base as the the initial point of the resulting merged dict, and considering the nested dictionaries as trees, they are merged os: 1. Every path to every leaf in priority would be represented in the result. 2. Subtrees of base are overwritten if a leaf is found in the corresponding path in priority. 3. The invariant that all priority leaf nodes remain leafs is maintained. Parameters ---------- base : dict The first, lower-priority, dict to merge. priority : dict The second, higher-priority, dict to merge. Returns ------- dict A recursive merge of the two given dicts. Example: -------- >>> base = {'a': 1, 'b': 2, 'c': {'d': 4}, 'e': 5} >>> priority = {'a': {'g': 7}, 'c': 3, 'e': 5, 'f': 6} >>> result = deep_merge_dict(base, priority) >>> print(sorted(result.items())) [('a', {'g': 7}), ('b', 2), ('c', 3), ('e', 5), ('f', 6)]
def generate_css(self, output_file): if self.CSS_TEMPLATE_NAME is not None: template = TEMPLATE_ENV.get_template(self.CSS_TEMPLATE_NAME) style = template.render(self._context()) if isinstance(style, six.string_types): style = style.encode('utf-8') output_file.write(style)
Generate an external style sheet file. output_file must be a file handler that takes in bytes!
def get_data_size(self, sport, plan, from_day, from_month, from_year, to_day, to_month, to_year, event_id=None, event_name=None, market_types_collection=None, countries_collection=None, file_type_collection=None, session=None): params = clean_locals(locals()) method = 'GetAdvBasketDataSize' (response, elapsed_time) = self.request(method, params, session) return response
Returns a dictionary of file count and combines size files. :param sport: sport to filter data for. :param plan: plan type to filter for, Basic Plan, Advanced Plan or Pro Plan. :param from_day: day of month to start data from. :param from_month: month to start data from. :param from_year: year to start data from. :param to_day: day of month to end data at. :param to_month: month to end data at. :param to_year: year to end data at. :param event_id: id of a specific event to get data for. :param event_name: name of a specific event to get data for. :param market_types_collection: list of specific marketTypes to filter for. :param countries_collection: list of countries to filter for. :param file_type_collection: list of file types. :param requests.session session: Requests session object :rtype: dict
def Get(self): args = vfs_pb2.ApiGetFileDetailsArgs( client_id=self.client_id, file_path=self.path) data = self._context.SendRequest("GetFileDetails", args).file return File(client_id=self.client_id, data=data, context=self._context)
Fetch file's data and return proper File object.
def load_builtin_plugins() -> int: plugin_dir = os.path.join(os.path.dirname(__file__), 'plugins') return load_plugins(plugin_dir, 'nonebot.plugins')
Load built-in plugins distributed along with "nonebot" package.
def digest(self): if self._digest is None: if self._buf: self._add_block(self._buf) self._buf = EMPTY ctx = self._blake2s(0, 1, True) for t in self._thread: ctx.update(t.digest()) self._digest = ctx.digest() return self._digest
Return final digest value.
def has_logs(self): found_files = [] if self.logpath is None: return found_files if os.path.exists(self.logpath): for root, _, files in os.walk(os.path.abspath(self.logpath)): for fil in files: found_files.append(os.path.join(root, fil)) return found_files
Check if log files are available and return file names if they exist. :return: list
def prepare_connection(): elasticsearch_host = getattr(settings, 'ELASTICSEARCH_HOST', 'localhost') elasticsearch_port = getattr(settings, 'ELASTICSEARCH_PORT', 9200) connections.create_connection(hosts=['{}:{}'.format(elasticsearch_host, elasticsearch_port)])
Set dafault connection for ElasticSearch. .. warning:: In case of using multiprocessing/multithreading, connection will be probably initialized in the main process/thread and the same connection (socket) will be used in all processes/threads. This will cause some unexpected timeouts of pushes to Elasticsearch. So make sure that this function is called again in each process/thread to make sure that unique connection will be used.
def cmdloop(self): while True: cmdline = input(self.prompt) tokens = shlex.split(cmdline) if not tokens: if self.last_cmd: tokens = self.last_cmd else: print('No previous command.') continue if tokens[0] not in self.commands: print('Invalid command') continue command = self.commands[tokens[0]] self.last_cmd = tokens try: if command(self.state, tokens): break except CmdExit: continue except Exception as e: if e not in self.safe_exceptions: logger.exception('Error!')
Start CLI REPL.
def search(self): logger.debug("Grafana search... %s", cherrypy.request.method) if cherrypy.request.method == 'OPTIONS': cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET,POST,PATCH,PUT,DELETE' cherrypy.response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization' cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' cherrypy.request.handler = None return {} if getattr(cherrypy.request, 'json', None): logger.debug("Posted data: %s", cherrypy.request.json) logger.debug("Grafana search returns: %s", GRAFANA_TARGETS) return GRAFANA_TARGETS
Request available queries Posted data: {u'target': u''} Return the list of available target queries :return: See upper comment :rtype: list
def reset(self): self._count = 0 self._exception_count = 0 self._stat_start_time = None self._time_sum = float(0) self._time_min = float('inf') self._time_max = float(0) self._server_time_sum = float(0) self._server_time_min = float('inf') self._server_time_max = float(0) self._server_time_stored = False self._request_len_sum = float(0) self._request_len_min = float('inf') self._request_len_max = float(0) self._reply_len_sum = float(0) self._reply_len_min = float('inf') self._reply_len_max = float(0)
Reset the statistics data for this object.
def set_defaults(self, default_values, recursive = False): result = Parameters() if recursive: RecursiveObjectWriter.copy_properties(result, default_values) RecursiveObjectWriter.copy_properties(result, self) else: ObjectWriter.set_properties(result, default_values) ObjectWriter.set_properties(result, self) return result
Set default values from specified Parameters and returns a new Parameters object. :param default_values: Parameters with default parameter values. :param recursive: (optional) true to perform deep copy, and false for shallow copy. Default: false :return: a new Parameters object.
def profile(self): with self._mutex: if not self._profile: profile = self._obj.get_profile() self._profile = utils.nvlist_to_dict(profile.properties) return self._profile
The manager's profile.
def solve(self): result = Formula(path_actions(a_star_search( ({f: self.cube[f] for f in "LUFDRB"}, self.cube.select_type("edge") & self.cube.has_colour(self.cube["D"].colour)), self.cross_successors, self.cross_state_value, self.cross_goal, ))) self.cube(result) return result
Solve the cross.
def mode(keys, axis=semantics.axis_default, weights=None, return_indices=False): index = as_index(keys, axis) if weights is None: unique, weights = count(index) else: unique, weights = group_by(index).sum(weights) bin = np.argmax(weights) _mode = unique[bin] if return_indices: indices = index.sorter[index.start[bin]: index.stop[bin]] return _mode, indices else: return _mode
compute the mode, or most frequent occuring key in a set Parameters ---------- keys : ndarray, [n_keys, ...] input array. elements of 'keys' can have arbitrary shape or dtype weights : ndarray, [n_keys], optional if given, the contribution of each key to the mode is weighted by the given weights return_indices : bool if True, return all indices such that keys[indices]==mode holds Returns ------- mode : ndarray, [...] the most frequently occuring key in the key sequence indices : ndarray, [mode_multiplicity], int, optional if return_indices is True, all indices such that points[indices]==mode holds
def get_module_names(package_path, pattern="lazy_*.py*"): package_contents = glob(os.path.join(package_path[0], pattern)) relative_path_names = (os.path.split(name)[1] for name in package_contents) no_ext_names = (os.path.splitext(name)[0] for name in relative_path_names) return sorted(set(no_ext_names))
All names in the package directory that matches the given glob, without their extension. Repeated names should appear only once.
def check_extensions(extensions: Set[str], allow_multifile: bool = False): check_var(extensions, var_types=set, var_name='extensions') for ext in extensions: check_extension(ext, allow_multifile=allow_multifile)
Utility method to check that all extensions in the provided set are valid :param extensions: :param allow_multifile: :return:
def DeleteAttachment(self, attachment_link, options=None): if options is None: options = {} path = base.GetPathFromLink(attachment_link) attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link) return self.DeleteResource(path, 'attachments', attachment_id, None, options)
Deletes an attachment. :param str attachment_link: The link to the attachment. :param dict options: The request options for the request. :return: The deleted Attachment. :rtype: dict
def _sample_stratum(self, pmf=None, replace=True): if pmf is None: pmf = self.weights_ if not replace: empty = (self._n_sampled >= self.sizes_) if np.any(empty): pmf = copy.copy(pmf) pmf[empty] = 0 if np.sum(pmf) == 0: raise(RuntimeError) pmf /= np.sum(pmf) return np.random.choice(self.indices_, p = pmf)
Sample a stratum Parameters ---------- pmf : array-like, shape=(n_strata,), optional, default None probability distribution to use when sampling from the strata. If not given, use the stratum weights. replace : bool, optional, default True whether to sample with replacement Returns ------- int a randomly selected stratum index
def capture_sale(self, transaction_id, capture_amount, message=None): request_data = { "amount": self.base.convert_decimal_to_hundreds(capture_amount), "currency": self.currency, "message": message } url = "%s%s%s/capture" % (self.api_endpoint, constants.TRANSACTION_STATUS_ENDPOINT, transaction_id) username = self.base.get_username() password = self.base.get_password(username=username, request_url=url) response = requests.put(url, json=request_data, auth=HTTPBasicAuth(username=username, password=password)) if response.status_code == 404: raise TransactionDoesNotExist('Wrong transaction ID!') if not self.base.verify_response(response.json()): raise SignatureValidationException('Server signature verification has failed') response_json = response.json() return response_json.get('status')
Capture existing preauth. :param transaction_id: :param capture_amount: :param message: :return: status code
def _disbatch_runner_async(self, chunk): pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data)
Disbatch runner client_async commands
def write_intro (self): self.writeln(configuration.AppInfo) self.writeln(configuration.Freeware) self.writeln(_("Get the newest version at %(url)s") % {'url': configuration.Url}) self.writeln(_("Write comments and bugs to %(url)s") % {'url': configuration.SupportUrl}) self.writeln(_("Support this project at %(url)s") % {'url': configuration.DonateUrl}) self.check_date() self.writeln() self.writeln(_("Start checking at %s") % strformat.strtime(self.starttime))
Log introduction text.
def ellplot (mjr, mnr, pa): _ellcheck (mjr, mnr, pa) import omega as om th = np.linspace (0, 2 * np.pi, 200) x, y = ellpoint (mjr, mnr, pa, th) return om.quickXY (x, y, 'mjr=%f mnr=%f pa=%f' % (mjr, mnr, pa * 180 / np.pi))
Utility for debugging.
def create_writer_of_type(type_name): writers = available_writers() if type_name not in writers.keys(): raise UnknownWriterException('Unknown writer: %s' % (type_name,)) return writers[type_name]()
Create an instance of the writer with the given name. Args: type_name: The name of a writer. Returns: An instance of the writer with the given type.
def parse(self, nodes): self.last_node_type = self.initial_node_type for node_number, node in enumerate(nodes): try: self.step(node) except Exception as ex: raise Exception("An error occurred on node {}".format(node_number)) from ex
Given a stream of node data, try to parse the nodes according to the machine's graph.
def get_custom_annotations_recursive(data_type): data_types_seen = set() def recurse(data_type): if data_type in data_types_seen: return data_types_seen.add(data_type) dt, _, _ = unwrap(data_type) if is_struct_type(dt) or is_union_type(dt): for field in dt.fields: for annotation in recurse(field.data_type): yield annotation for annotation in field.custom_annotations: yield annotation elif is_list_type(dt): for annotation in recurse(dt.data_type): yield annotation elif is_map_type(dt): for annotation in recurse(dt.value_data_type): yield annotation for annotation in get_custom_annotations_for_alias(data_type): yield annotation return recurse(data_type)
Given a Stone data type, returns all custom annotations applied to any of its memebers, as well as submembers, ..., to an arbitrary depth.
def fit(self, X, y=None): X = self._check_array(X) solver_kwargs = self._get_solver_kwargs() self._coef = algorithms._solvers[self.solver](X, y, **solver_kwargs) if self.fit_intercept: self.coef_ = self._coef[:-1] self.intercept_ = self._coef[-1] else: self.coef_ = self._coef return self
Fit the model on the training data Parameters ---------- X: array-like, shape (n_samples, n_features) y : array-like, shape (n_samples,) Returns ------- self : objectj
def process_nxml_file(fname, output_fmt='json', outbuf=None, cleanup=True, **kwargs): sp = None out_fname = None try: out_fname = run_sparser(fname, output_fmt, outbuf, **kwargs) sp = process_sparser_output(out_fname, output_fmt) except Exception as e: logger.error("Sparser failed to run on %s." % fname) logger.exception(e) finally: if out_fname is not None and os.path.exists(out_fname) and cleanup: os.remove(out_fname) return sp
Return processor with Statements extracted by reading an NXML file. Parameters ---------- fname : str The path to the NXML file to be read. output_fmt: Optional[str] The output format to obtain from Sparser, with the two options being 'json' and 'xml'. Default: 'json' outbuf : Optional[file] A file like object that the Sparser output is written to. cleanup : Optional[bool] If True, the output file created by Sparser is removed. Default: True Returns ------- sp : SparserXMLProcessor or SparserJSONProcessor depending on what output format was chosen.
def get_tokens(condition): try: ast_tokens = list(ast.walk(ast.parse(condition.strip()))) except SyntaxError as exception: Logger.get_logger(__name__).error("Syntax error: %s", exception) ast_tokens = [] return ast_tokens
Get AST tokens for Python condition. Returns: list: list of AST tokens
def checkout_dirs(self): directories = [os.path.join(self.base_directory, d) for d in os.listdir(self.base_directory)] return [d for d in directories if os.path.isdir(d)]
Return directories inside the base directory.
def drag_and_drop(self, source_selector, destination_selector, **kwargs): self.info_log( "Drag and drop: source (%s); destination (%s)" % (source_selector, destination_selector) ) use_javascript_dnd = kwargs.get( "use_javascript_dnd", "proxy_driver:use_javascript_dnd" ) source_el = self.find(source_selector) destination_el = self.find(destination_selector) if use_javascript_dnd: try: dnd_script = [ "function simulate(f,c,d,e){var b,a=null;for(b in eventMatchers)if(eventMatchers[b].test(c)){a=b;break}if(!a)return!1;document.createEvent?(b=document.createEvent(a),a=='HTMLEvents'?b.initEvent(c,!0,!0):b.initMouseEvent(c,!0,!0,document.defaultView,0,d,e,d,e,!1,!1,!1,!1,0,null),f.dispatchEvent(b)):(a=document.createEventObject(),a.detail=0,a.screenX=d,a.screenY=e,a.clientX=d,a.clientY=e,a.ctrlKey=!1,a.altKey=!1,a.shiftKey=!1,a.metaKey=!1,a.button=1,f.fireEvent('on'+c,a));return!0} var eventMatchers={HTMLEvents:/^(?:load|unload|abort|error|select|change|submit|reset|focus|blur|resize|scroll)$/,MouseEvents:/^(?:click|dblclick|mouse(?:down|up|over|move|out))$/};", "var source = arguments[0],destination = arguments[1];", "simulate(source, 'mousedown', 0, 0);", "simulate(source, 'mousemove', destination.offsetLeft, destination.offsetTop);", "simulate(source, 'mouseup', destination.offsetLeft, destination.offsetTop);" ] self._driver.execute_script( '\n'.join(dnd_script), source_el._element, destination_el._element ) except Exception as e: self.error_log(u'drag_and_drop exception: %s' % str(e)) raise else: try: ActionChains(self._driver).drag_and_drop( source_el, destination_el ).perform() except Exception as e: self.error_log(u'drag_and_drop exception: %s' % str(e)) raise
Drag and drop Args: source_selector: (str) destination_selector: (str) Kwargs: use_javascript_dnd: bool; default: config proxy_driver:use_javascript_dnd
def close_filenos(preserve): maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fileno in range(maxfd): if fileno not in preserve: try: os.close(fileno) except OSError as err: if not err.errno == errno.EBADF: raise DaemonError( 'Failed to close file descriptor {0}: {1}' .format(fileno, err))
Close unprotected file descriptors Close all open file descriptors that are not in preserve. If ulimit -nofile is "unlimited", all is defined filenos <= 4096, else all is <= the output of resource.getrlimit(). :param preserve: set with protected files :type preserve: set :return: None
def _create_field_mapping_action(self): icon = resources_path('img', 'icons', 'show-mapping-tool.svg') self.action_field_mapping = QAction( QIcon(icon), self.tr('InaSAFE Field Mapping Tool'), self.iface.mainWindow()) self.action_field_mapping.setStatusTip(self.tr( 'Assign field mapping to layer.')) self.action_field_mapping.setWhatsThis(self.tr( 'Use this tool to assign field mapping in layer.')) self.action_field_mapping.setEnabled(False) self.action_field_mapping.triggered.connect(self.show_field_mapping) self.add_action( self.action_field_mapping, add_to_toolbar=self.full_toolbar)
Create action for showing field mapping dialog.
def OnCardRightClick(self, event): item = event.GetItem() if item: itemdata = self.readertreepanel.cardtreectrl.GetItemPyData(item) if isinstance(itemdata, smartcard.Card.Card): self.selectedcard = itemdata if not hasattr(self, "connectID"): self.connectID = wx.NewId() self.disconnectID = wx.NewId() self.Bind(wx.EVT_MENU, self.OnConnect, id=self.connectID) self.Bind( wx.EVT_MENU, self.OnDisconnect, id=self.disconnectID) menu = wx.Menu() if not hasattr(self.selectedcard, 'connection'): menu.Append(self.connectID, "Connect") else: menu.Append(self.disconnectID, "Disconnect") self.PopupMenu(menu) menu.Destroy()
Called when user right-clicks a node in the card tree control.
def remove_column(self, column_name, inplace=False): if column_name not in self.column_names(): raise KeyError('Cannot find column %s' % column_name) if inplace: self.__is_dirty__ = True try: with cython_context(): if self._is_vertex_frame(): assert column_name != '__id', 'Cannot remove \"__id\" column' graph_proxy = self.__graph__.__proxy__.delete_vertex_field(column_name) self.__graph__.__proxy__ = graph_proxy elif self._is_edge_frame(): assert column_name != '__src_id', 'Cannot remove \"__src_id\" column' assert column_name != '__dst_id', 'Cannot remove \"__dst_id\" column' graph_proxy = self.__graph__.__proxy__.delete_edge_field(column_name) self.__graph__.__proxy__ = graph_proxy return self except: self.__is_dirty__ = False raise else: return super(GFrame, self).remove_column(column_name, inplace=inplace)
Removes the column with the given name from the SFrame. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_name : string The name of the column to remove. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place.
def _two_qubit_accumulate_into_scratch(args: Dict[str, Any]): index0, index1 = args['indices'] half_turns = args['half_turns'] scratch = _scratch_shard(args) projector = _one_projector(args, index0) * _one_projector(args, index1) scratch += 2 * half_turns * projector
Accumulates two qubit phase gates into the scratch shards.
def inserir(self, id_brand, name): model_map = dict() model_map['name'] = name model_map['id_brand'] = id_brand code, xml = self.submit({'model': model_map}, 'POST', 'model/') return self.response(code, xml)
Inserts a new Model and returns its identifier :param id_brand: Identifier of the Brand. Integer value and greater than zero. :param name: Model name. String with a minimum 3 and maximum of 100 characters :return: Dictionary with the following structure: :: {'model': {'id': < id_model >}} :raise InvalidParameterError: The identifier of Brand or name is null and invalid. :raise NomeMarcaModeloDuplicadoError: There is already a registered Model with the value of name and brand. :raise MarcaNaoExisteError: Brand not registered. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response
def to_array(self, variables): arr = np.zeros(len(variables), np.int8) dc = dict(self) for i, var in enumerate(variables): arr[i] = dc.get(var, arr[i]) return arr
Converts the clamping to a 1-D array with respect to the given variables Parameters ---------- variables : list[str] List of variables names Returns ------- `numpy.ndarray`_ 1-D array where position `i` correspond to the sign of the clamped variable at position `i` in the given list of variables .. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray
def compare(left: Union[str, pathlib.Path, _Entity], right: Union[str, pathlib.Path, _Entity]) -> Comparison: def normalise(param: Union[str, pathlib.Path, _Entity]) -> _Entity: if isinstance(param, str): param = pathlib.Path(param) if isinstance(param, pathlib.Path): param = _Entity.from_path(param) return param return Comparison.compare(normalise(left), normalise(right))
Compare two paths. :param left: The left side or "before" entity. :param right: The right side or "after" entity. :return: A comparison details what has changed from the left side to the right side.
def check_move(self, move_type, move_x, move_y): if move_type not in self.move_types: raise ValueError("This is not a valid move!") if move_x < 0 or move_x >= self.board_width: raise ValueError("This is not a valid X position of the move!") if move_y < 0 or move_y >= self.board_height: raise ValueError("This is not a valid Y position of the move!") move_des = {} move_des["move_type"] = move_type move_des["move_x"] = move_x move_des["move_y"] = move_y self.num_moves += 1 return move_des
Check if a move is valid. If the move is not valid, then shut the game. If the move is valid, then setup a dictionary for the game, and update move counter. TODO: maybe instead of shut the game, can end the game or turn it into a valid move? Parameters ---------- move_type : string one of four move types: "click", "flag", "unflag", "question" move_x : int X position of the move move_y : int Y position of the move
def defaults(cls, *options, **kwargs): if kwargs and len(kwargs) != 1 and list(kwargs.keys())[0] != 'backend': raise Exception('opts.defaults only accepts "backend" keyword argument') cls._linemagic(cls._expand_options(merge_options_to_dict(options)), backend=kwargs.get('backend'))
Set default options for a session. Set default options for a session. whether in a Python script or a Jupyter notebook. Args: *options: Option objects used to specify the defaults. backend: The plotting extension the options apply to
def ancestor_of(self, name, ancestor, visited=None): if visited is None: visited = set() node = self._nodes.get(name) if node is None or name not in self._nodes: return False stack = list(node.parents) while stack: current = stack.pop() if current == ancestor: return True if current not in visited: visited.add(current) node = self._nodes.get(current) if node is not None: stack.extend(node.parents) return False
Check whether a node has another node as an ancestor. name: The name of the node being checked. ancestor: The name of the (possible) ancestor node. visited: (optional, None) If given, a set of nodes that have already been traversed. NOTE: The set will be updated with any new nodes that are visited. NOTE: If node doesn't exist, the method will return False.
def has_gap_in_elf_shndx(self): if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute( self._ptr, BfdAttributes.HAS_GAP_IN_ELF_SHNDX)
Return the has gap in elf shndx attribute of the BFD file being processed.
def dependencies(project_name): log = logging.getLogger('ciu') log.info('Locating dependencies for {}'.format(project_name)) located = distlib.locators.locate(project_name, prereleases=True) if not located: log.warning('{0} not found'.format(project_name)) return None return {packaging.utils.canonicalize_name(pypi.just_name(dep)) for dep in located.run_requires}
Get the dependencies for a project.
def get_auth_url(self, app_id, canvas_url, perms=None, **kwargs): url = "{0}{1}/{2}".format( FACEBOOK_WWW_URL, self.version, FACEBOOK_OAUTH_DIALOG_PATH ) args = {"client_id": app_id, "redirect_uri": canvas_url} if perms: args["scope"] = ",".join(perms) args.update(kwargs) return url + urlencode(args)
Build a URL to create an OAuth dialog.
def format_platforms(cls, platforms): lines = [] if platforms: lines.append('This DAP is only supported on the following platforms:') lines.extend([' * ' + platform for platform in platforms]) return lines
Formats supported platforms in human readable form
def influx_query_(self, q): if self.influx_cli is None: self.err( self.influx_query_, "No database connected. Please initialize a connection") return try: return self.influx_cli.query(q) except Exception as e: self.err(e, self.influx_query_, "Can not query database")
Runs an Influx db query
def register_parser(parser, subparsers_action=None, categories=('other', ), add_help=True): name = re.sub('^dx ', '', parser.prog) if subparsers_action is None: subparsers_action = subparsers if isinstance(categories, basestring): categories = (categories, ) parser_map[name] = parser if add_help: _help = subparsers_action._choices_actions[-1].help parser_categories['all']['cmds'].append((name, _help)) for category in categories: parser_categories[category]['cmds'].append((name, _help))
Attaches `parser` to the global ``parser_map``. If `add_help` is truthy, then adds the helpstring of `parser` into the output of ``dx help...``, for each category in `categories`. :param subparsers_action: A special action object that is returned by ``ArgumentParser.add_subparsers(...)``, or None. :type subparsers_action: argparse._SubParsersAction, or None.
def validate_driver_or_none(option, value): if value is None: return value if not isinstance(value, DriverInfo): raise TypeError("%s must be an instance of DriverInfo" % (option,)) return value
Validate the driver keyword arg.
def gpg_key(value): try: return crypto.get_key(value) except GPGProblem as e: raise ValidateError(str(e))
test if value points to a known gpg key and return that key as a gpg key object.
def get_data(n_samples=100): X, y = make_classification( n_samples=n_samples, n_features=N_FEATURES, n_classes=N_CLASSES, random_state=0, ) X = X.astype(np.float32) return X, y
Get synthetic classification data with n_samples samples.
def normalize(body_part_tup,): return '\n\n'.join( [ '{}\n\n{}'.format( str(p.headers[b'Content-Disposition'], p.encoding), p.text ) for p in sorted( body_part_tup, key=lambda p: p.headers[b'Content-Disposition'] ) ] )
Normalize a tuple of BodyPart objects to a string. Normalization is done by sorting the body_parts by the Content- Disposition headers, which is typically on the form, ``form-data; name="name_of_part``.
def disable_insecure_request_warning(): import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
Suppress warning about untrusted SSL certificate.
def capture_url_missing_namespace(self, node): for arg in node.args: if not(isinstance(arg, ast.Call) and isinstance(arg.func, ast.Name)): continue if arg.func.id != 'include': continue for keyword in arg.keywords: if keyword.arg == 'namespace': return return DJ05( lineno=node.lineno, col=node.col_offset, )
Capture missing namespace in url include.
def wrap_callback(function): @wraps(function) def wrapped(task): task._callback_result = function(task) return task._callback_result return wrapped
Set the callback's result as self._callback_result.
def add_standard_attention_hparams(hparams): hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("attention_dropout", 0.0) hparams.add_hparam("attention_loc_block_length", 256) hparams.add_hparam("attention_loc_block_width", 128) hparams.add_hparam("attention_red_factor", 3) hparams.add_hparam("attention_red_type", "conv") hparams.add_hparam("attention_red_nonlinearity", "none") hparams.add_hparam("filter_size", 2048) hparams.add_hparam("relu_dropout", 0.0) return hparams
Adds the hparams used by get_standardized_layers.