code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def to_string(self, obj): try: converted = [str(element) for element in obj] string = ','.join(converted) except TypeError: string = str(obj) return string
Picks up an object and transforms it into a string, by coercing each element in an iterable to a string and then joining them, or by trying to coerce the object directly
def color(nickname): _hex = md5(nickname).hexdigest()[:6] darken = lambda s: str(int(round(int(s, 16) * .7))) return "rgb(%s)" % ",".join([darken(_hex[i:i+2]) for i in range(6)[::2]])
Provides a consistent color for a nickname. Uses first 6 chars of nickname's md5 hash, and then slightly darkens the rgb values for use on a light background.
def post(self, ddata, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT): headers = HEADERS.copy() if referer is None: headers.pop('Referer') else: headers['Referer'] = referer if 'csrfmiddlewaretoken' not in ddata.keys(): ddata['csrfmiddlewaretoken'] = self._parent.csrftoken req = self._parent.client.post(url, headers=headers, data=ddata) if req.status_code == 200: self.update()
Method to update some attributes on namespace.
def int_check(*args, func=None): func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, numbers.Integral): name = type(var).__name__ raise ComplexError( f'Function {func} expected integral number, {name} got instead.')
Check if arguments are integrals.
def stem_singular_word(self, word): context = Context(word, self.dictionary, self.visitor_provider) context.execute() return context.result
Stem a singular word to its common stem form.
def index_missing_documents(self, documents, request=None): log.info('Trying to index documents of type `{}` missing from ' '`{}` index'.format(self.doc_type, self.index_name)) if not documents: log.info('No documents to index') return query_kwargs = dict( index=self.index_name, doc_type=self.doc_type, fields=['_id'], body={'ids': [d['_pk'] for d in documents]}, ) try: response = self.api.mget(**query_kwargs) except IndexNotFoundException: indexed_ids = set() else: indexed_ids = set( d['_id'] for d in response['docs'] if d.get('found')) documents = [d for d in documents if str(d['_pk']) not in indexed_ids] if not documents: log.info('No documents of type `{}` are missing from ' 'index `{}`'.format(self.doc_type, self.index_name)) return self._bulk('index', documents, request)
Index documents that are missing from ES index. Determines which documents are missing using ES `mget` call which returns a list of document IDs as `documents`. Then missing `documents` from that list are indexed.
def _print_err(*args): if not CFG.debug: return if not args: return encoding = 'utf8' if os.name == 'posix' else 'gbk' args = [_cs(a, encoding) for a in args] f_back = None try: raise Exception except: f_back = sys.exc_traceback.tb_frame.f_back f_name = f_back.f_code.co_name filename = os.path.basename(f_back.f_code.co_filename) m_name = os.path.splitext(filename)[0] prefix = ('[%s.%s]'%(m_name, f_name)).ljust(20, ' ') print bcolors.FAIL+'[%s]'%str(datetime.datetime.now()), prefix, ' '.join(args) + bcolors.ENDC
Print errors. *args list, list of printing contents
def iss_spi_divisor(self, sck): _divisor = (6000000 / sck) - 1 divisor = int(_divisor) if divisor != _divisor: raise ValueError('Non-integer SCK divisor.') if not 1 <= divisor < 256: error = ( "The value of sck_divisor, {}, " "is not between 0 and 255".format(divisor) ) raise ValueError(error) return divisor
Calculate a USBISS SPI divisor value from the input SPI clock speed :param sck: SPI clock frequency :type sck: int :returns: ISS SCK divisor :rtype: int
def _get_history_minute_window(self, assets, end_dt, bar_count, field_to_use): try: minutes_for_window = self.trading_calendar.minutes_window( end_dt, -bar_count ) except KeyError: self._handle_minute_history_out_of_bounds(bar_count) if minutes_for_window[0] < self._first_trading_minute: self._handle_minute_history_out_of_bounds(bar_count) asset_minute_data = self._get_minute_window_data( assets, field_to_use, minutes_for_window, ) return pd.DataFrame( asset_minute_data, index=minutes_for_window, columns=assets )
Internal method that returns a dataframe containing history bars of minute frequency for the given sids.
def _get_value_from_match(self, key, match): value = match.groups(1)[0] clean_value = str(value).lstrip().rstrip() if clean_value == 'true': self._log.info('Got value of "%s" as boolean true.', key) return True if clean_value == 'false': self._log.info('Got value of "%s" as boolean false.', key) return False try: float_value = float(clean_value) self._log.info('Got value of "%s" as float "%f".', key, float_value) return float_value except ValueError: self._log.info('Got value of "%s" as string "%s".', key, clean_value) return clean_value
Gets the value of the property in the given MatchObject. Args: key (str): Key of the property looked-up. match (MatchObject): The matched property. Return: The discovered value, as a string or boolean.
def get(cls, scope=None): if scope is None: scope = cls.default if isinstance(scope, string_types) and scope in cls._keywords: return getattr(cls, scope) return scope
Return default or predefined URLs from keyword, pass through ``scope``.
def load(self, carddict): self.code = carddict["code"] if isinstance(self.code, text_type): self.code = eval(self.code) self.name = carddict["name"] self.abilities = carddict["abilities"] if isinstance(self.abilities, text_type): self.abilities = eval(self.abilities) self.attributes = carddict["attributes"] if isinstance(self.attributes, text_type): self.attributes = eval(self.attributes) self.info = carddict["info"] if isinstance(self.info, text_type): self.info = eval(self.info) return self
Takes a carddict as produced by ``Card.save`` and sets this card instances information to the previously saved cards information.
def list(self, cart_glob=['*.json']): carts = [] for glob in cart_glob: if not glob.endswith('.json'): search_glob = glob + ".json" else: search_glob = glob for cart in juicer.utils.find_pattern(Constants.CART_LOCATION, search_glob): cart_name = cart.split('/')[-1].replace('.json', '') carts.append(cart_name) return carts
List all carts
def get_weights(self, data, F): beta = np.var(data) trans_F = F.T.copy() W = np.zeros((self.K, data.shape[1])) if self.weight_method == 'rr': W = np.linalg.solve(trans_F.dot(F) + beta * np.identity(self.K), trans_F.dot(data)) else: W = np.linalg.solve(trans_F.dot(F), trans_F.dot(data)) return W
Calculate weight matrix based on fMRI data and factors Parameters ---------- data : 2D array, with shape [n_voxel, n_tr] fMRI data from one subject F : 2D array, with shape [n_voxel,self.K] The latent factors from fMRI data. Returns ------- W : 2D array, with shape [K, n_tr] The weight matrix from fMRI data.
def key_description(self): "Return a description of the key" vk, scan, flags = self._get_key_info() desc = '' if vk: if vk in CODE_NAMES: desc = CODE_NAMES[vk] else: desc = "VK %d"% vk else: desc = "%s"% self.key return desc
Return a description of the key
def _sumterm_prime(lexer): tok = next(lexer) if isinstance(tok, OP_or): xorterm = _xorterm(lexer) sumterm_prime = _sumterm_prime(lexer) if sumterm_prime is None: return xorterm else: return ('or', xorterm, sumterm_prime) else: lexer.unpop_token(tok) return None
Return a sum term' expression, eliminates left recursion.
def module(command, *args): if 'MODULESHOME' not in os.environ: print('payu: warning: No Environment Modules found; skipping {0} call.' ''.format(command)) return modulecmd = ('{0}/bin/modulecmd'.format(os.environ['MODULESHOME'])) cmd = '{0} python {1} {2}'.format(modulecmd, command, ' '.join(args)) envs, _ = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE).communicate() exec(envs)
Run the modulecmd tool and use its Python-formatted output to set the environment variables.
def removeDefaultAttributeValue(node, attribute): if not node.hasAttribute(attribute.name): return 0 if isinstance(attribute.value, str): if node.getAttribute(attribute.name) == attribute.value: if (attribute.conditions is None) or attribute.conditions(node): node.removeAttribute(attribute.name) return 1 else: nodeValue = SVGLength(node.getAttribute(attribute.name)) if ((attribute.value is None) or ((nodeValue.value == attribute.value) and not (nodeValue.units == Unit.INVALID))): if ((attribute.units is None) or (nodeValue.units == attribute.units) or (isinstance(attribute.units, list) and nodeValue.units in attribute.units)): if (attribute.conditions is None) or attribute.conditions(node): node.removeAttribute(attribute.name) return 1 return 0
Removes the DefaultAttribute 'attribute' from 'node' if specified conditions are fulfilled Warning: Does NOT check if the attribute is actually valid for the passed element type for increased preformance!
def IOC_TYPECHECK(t): result = ctypes.sizeof(t) assert result <= _IOC_SIZEMASK, result return result
Returns the size of given type, and check its suitability for use in an ioctl command number.
def _options_request(self, url, **kwargs): request_kwargs = { 'method': 'OPTIONS', 'url': url } for key, value in kwargs.items(): request_kwargs[key] = value return self._request(**request_kwargs)
a method to catch and report http options request connectivity errors
def gc(self): gc = len([base for base in self.seq if base == 'C' or base == 'G']) return float(gc) / len(self)
Find the frequency of G and C in the current sequence.
def goal(self, goal_name, count=1): for enrollment in self._get_all_enrollments(): if enrollment.experiment.is_displaying_alternatives(): self._experiment_goal(enrollment.experiment, enrollment.alternative, goal_name, count)
Record that this user has performed a particular goal This will update the goal stats for all experiments the user is enrolled in.
def rgevolve_leadinglog(self, scale_out): self._check_initial() return rge.smeft_evolve_leadinglog(C_in=self.C_in, scale_high=self.scale_high, scale_in=self.scale_in, scale_out=scale_out)
Compute the leading logarithmix approximation to the solution of the SMEFT RGEs from the initial scale to `scale_out`. Returns a dictionary with parameters and Wilson coefficients. Much faster but less precise that `rgevolve`.
def app_profile( self, app_profile_id, routing_policy_type=None, description=None, cluster_id=None, allow_transactional_writes=None, ): return AppProfile( app_profile_id, self, routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, allow_transactional_writes=allow_transactional_writes, )
Factory to create AppProfile associated with this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_create_app_profile] :end-before: [END bigtable_create_app_profile] :type app_profile_id: str :param app_profile_id: The ID of the AppProfile. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type: routing_policy_type: int :param: routing_policy_type: The type of the routing policy. Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Long form description of the use case for this AppProfile. :type: cluster_id: str :param: cluster_id: (Optional) Unique cluster_id which is only required when routing_policy_type is ROUTING_POLICY_TYPE_SINGLE. :type: allow_transactional_writes: bool :param: allow_transactional_writes: (Optional) If true, allow transactional writes for ROUTING_POLICY_TYPE_SINGLE. :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>` :returns: AppProfile for this instance.
def optimal_parameters(reconstruction, fom, phantoms, data, initial=None, univariate=False): r def func(lam): return sum(fom(reconstruction(datai, lam), phantomi) for phantomi, datai in zip(phantoms, data)) tol = np.finfo(phantoms[0].space.dtype).resolution * 10 if univariate: result = scipy.optimize.minimize_scalar( func, bracket=initial, tol=tol, bounds=None, options={'disp': False}) return result.x else: initial = np.asarray(initial) parameters = scipy.optimize.fmin_powell( func, initial, xtol=tol, ftol=tol, disp=False) return parameters
r"""Find the optimal parameters for a reconstruction method. Notes ----- For a forward operator :math:`A : X \to Y`, a reconstruction operator parametrized by :math:`\theta` is some operator :math:`R_\theta : Y \to X` such that .. math:: R_\theta(A(x)) \approx x. The optimal choice of :math:`\theta` is given by .. math:: \theta = \arg\min_\theta fom(R(A(x) + noise), x) where :math:`fom : X \times X \to \mathbb{R}` is a figure of merit. Parameters ---------- reconstruction : callable Function that takes two parameters: * data : The data to be reconstructed * parameters : Parameters of the reconstruction method The function should return the reconstructed image. fom : callable Function that takes two parameters: * reconstructed_image * true_image and returns a scalar figure of merit. phantoms : sequence True images. data : sequence The data to reconstruct from. initial : array-like or pair Initial guess for the parameters. It is - a required array in the multivariate case - an optional pair in the univariate case. univariate : bool, optional Whether to use a univariate solver Returns ------- parameters : 'numpy.ndarray' The optimal parameters for the reconstruction problem.
def identify(fn): return ( fn.__globals__['__name__'], getattr(fn, '__qualname__', getattr(fn, '__name__', '')) ) def __init__(self, fn): self.validate_function(fn) self.configured = False self.has_backup_plan = False if self.has_args(): self.backup_plan = fn else: self.id = self.identify(fn) self.backup_plan = big.overload._cache.get(self.id, None) self.configure_with(fn) def __call__(self, *args, **kwargs): try: return self.fn(*args, **kwargs) except Exception as ex: if self.has_backup_plan: return self.backup_plan(*args, **kwargs) elif self.configured: raise ex else: self.configure_with(*args, **kwargs) return self
returns a tuple that is used to match functions to their neighbors in their resident namespaces
def ln_growth(eqdata, **kwargs): if 'outputcol' not in kwargs: kwargs['outputcol'] = 'LnGrowth' return np.log(growth(eqdata, **kwargs))
Return the natural log of growth. See also -------- :func:`growth`
def stop(self, id): path = partial(_path, self.adapter) path = path(id) return self._delete(path)
stop the tracker.
def get_content_models(cls): concrete_model = base_concrete_model(ContentTyped, cls) return [m for m in apps.get_models() if m is not concrete_model and issubclass(m, concrete_model)]
Return all subclasses of the concrete model.
def space_acl(args): r = fapi.get_workspace_acl(args.project, args.workspace) fapi._check_response_code(r, 200) result = dict() for user, info in sorted(r.json()['acl'].items()): result[user] = info['accessLevel'] return result
Retrieve access control list for a workspace
def _safe_name(file_name, sep): file_name = stringify(file_name) if file_name is None: return file_name = ascii_text(file_name) file_name = category_replace(file_name, UNICODE_CATEGORIES) file_name = collapse_spaces(file_name) if file_name is None or not len(file_name): return return file_name.replace(WS, sep)
Convert the file name to ASCII and normalize the string.
def get(self, label, default=None): if label in self.index: loc = self.index.get_loc(label) return self._get_val_at(loc) else: return default
Returns value occupying requested label, default to specified missing value if not present. Analogous to dict.get Parameters ---------- label : object Label value looking for default : object, optional Value to return if label not in index Returns ------- y : scalar
def actions(obj, **kwargs): if 'exclude' in kwargs: kwargs['exclude'] = kwargs['exclude'].split(',') actions = obj.get_actions(**kwargs) if isinstance(actions, dict): actions = actions.values() buttons = "".join("%s" % action.render() for action in actions) return '<div class="actions">%s</div>' % buttons
Return actions available for an object
def action_draft(self): for rec in self: if not rec.state == 'cancelled': raise UserError( _('You need to cancel it before reopening.')) if not (rec.am_i_owner or rec.am_i_approver): raise UserError( _('You are not authorized to do this.\r\n' 'Only owners or approvers can reopen Change Requests.')) rec.write({'state': 'draft'})
Set a change request as draft
def _prepare_corerelation_data(self, X1, X2, start_voxel=0, num_processed_voxels=None): num_samples = len(X1) assert num_samples > 0, \ 'at least one sample is needed for correlation computation' num_voxels1 = X1[0].shape[1] num_voxels2 = X2[0].shape[1] assert num_voxels1 * num_voxels2 == self.num_features_, \ 'the number of features provided by the input data ' \ 'does not match the number of features defined in the model' assert X1[0].shape[0] == X2[0].shape[0], \ 'the numbers of TRs of X1 and X2 are not identical' if num_processed_voxels is None: num_processed_voxels = num_voxels1 corr_data = np.zeros((num_samples, num_processed_voxels, num_voxels2), np.float32, order='C') for idx, data in enumerate(X1): data2 = X2[idx] num_TRs = data.shape[0] blas.compute_corr_vectors('N', 'T', num_voxels2, num_processed_voxels, num_TRs, 1.0, data2, num_voxels2, data, num_voxels1, 0.0, corr_data, num_voxels2, start_voxel, idx) logger.debug( 'correlation computation done' ) return corr_data
Compute auto-correlation for the input data X1 and X2. it will generate the correlation between some voxels and all voxels Parameters ---------- X1: a list of numpy array in shape [num_TRs, num_voxels1] X1 contains the activity data filtered by ROIs and prepared for correlation computation. All elements of X1 must have the same num_voxels value. X2: a list of numpy array in shape [num_TRs, num_voxels2] len(X1) equals len(X2). All elements of X2 must have the same num_voxels value. X2 can be identical to X1; if not, X1 must have more voxels than X2 (guaranteed by self.fit and/or self.predict). start_voxel: int, default 0 the starting voxel id for correlation computation num_processed_voxels: int, default None the number of voxels it computes for correlation computation if it is None, it is set to self.num_voxels Returns ------- corr_data: the correlation data in shape [len(X), num_processed_voxels, num_voxels2]
def render(self): if not self.available(): return "" mtool = api.get_tool("portal_membership") member = mtool.getAuthenticatedMember() roles = member.getRoles() allowed = "LabManager" in roles or "Manager" in roles self.get_failed_instruments() if allowed and self.nr_failed: return self.index() else: return ""
Render the viewlet
def get_owner_ids_value(self, obj): return [ user.pk for user in get_users_with_permission(obj, get_full_perm('owner', obj)) ]
Extract owners' ids.
def gray2bin(G): return farray([G[i:].uxor() for i, _ in enumerate(G)])
Convert a gray-coded vector into a binary-coded vector.
def status(cls): return cls.json_get('%s/status' % cls.api_url, empty_key=True, send_key=False)
Retrieve global status from status.gandi.net.
def dirty(self): return not os.path.exists(self.cachename) or \ (os.path.getmtime(self.filename) > os.path.getmtime(self.cachename))
True if the cache needs to be updated, False otherwise
def get_my_ip(): ip = subprocess.check_output(GET_IP_CMD, shell=True).decode('utf-8')[:-1] return ip.strip()
Returns this computers IP address as a string.
def sum(self): return self._constructor(self.values.sum(axis=self.baseaxes, keepdims=True))
Compute the sum across records.
def classify_catalog(catalog): components = [] islands = [] simples = [] for source in catalog: if isinstance(source, OutputSource): components.append(source) elif isinstance(source, IslandSource): islands.append(source) elif isinstance(source, SimpleSource): simples.append(source) return components, islands, simples
Look at a list of sources and split them according to their class. Parameters ---------- catalog : iterable A list or iterable object of {SimpleSource, IslandSource, OutputSource} objects, possibly mixed. Any other objects will be silently ignored. Returns ------- components : list List of sources of type OutputSource islands : list List of sources of type IslandSource simples : list List of source of type SimpleSource
def wrap_io_os_err(e): msg = '' if e.strerror: msg = e.strerror if e.message: msg = ' '.join([e.message, msg]) if e.filename: msg = ': '.join([msg, e.filename]) return msg
Formats IO and OS error messages for wrapping in FSQExceptions
def get_userinfo(self): wanted_fields = ["name", "mobile", "orgEmail", "position", "avatar"] userinfo = {k: self.json_response.get(k, None) for k in wanted_fields} return userinfo
Method to get current user's name, mobile, email and position.
def _format_postconditions(postconditions: List[icontract._Contract], prefix: Optional[str] = None) -> List[str]: if not postconditions: return [] result = [] if prefix is not None: result.append(":{} ensures:".format(prefix)) else: result.append(":ensures:") for postcondition in postconditions: result.append(" * {}".format(_format_contract(contract=postcondition))) return result
Format postconditions as reST. :param postconditions: postconditions of a function :param prefix: prefix to be prepended to ``:ensures:`` directive :return: list of lines describing the postconditions
def register_instance(self, instance, allow_dotted_names=False): self.instance = instance self.allow_dotted_names = allow_dotted_names
Registers an instance to respond to XML-RPC requests. Only one instance can be installed at a time. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called by SimpleXMLRPCServer. If a registered function matches a XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the instance does not have a _dispatch method, method names containing dots are supported and resolved, as long as none of the name segments start with an '_'. *** SECURITY WARNING: *** Enabling the allow_dotted_names options allows intruders to access your module's global variables and may allow intruders to execute arbitrary code on your machine. Only use this option on a secure, closed network.
def _get_url_hashes(path): urls = _read_text_file(path) def url_hash(u): h = hashlib.sha1() try: u = u.encode('utf-8') except UnicodeDecodeError: logging.error('Cannot hash url: %s', u) h.update(u) return h.hexdigest() return {url_hash(u): True for u in urls}
Get hashes of urls in file.
def merge_paths(paths, weights=None): G = make_paths(paths, weights=weights) G = reduce_paths(G) return G
Zip together sorted lists. >>> paths = [[1, 2, 3], [1, 3, 4], [2, 4, 5]] >>> G = merge_paths(paths) >>> nx.topological_sort(G) [1, 2, 3, 4, 5] >>> paths = [[1, 2, 3, 4], [1, 2, 3, 2, 4]] >>> G = merge_paths(paths, weights=(1, 2)) >>> nx.topological_sort(G) [1, 2, 3, 4]
def _calc_new_threshold(self, score): if self.threshold_mode == 'rel': abs_threshold_change = self.threshold * score else: abs_threshold_change = self.threshold if self.lower_is_better: new_threshold = score - abs_threshold_change else: new_threshold = score + abs_threshold_change return new_threshold
Determine threshold based on score.
def locked(path, timeout=None): def decor(func): @functools.wraps(func) def wrapper(*args, **kwargs): lock = FileLock(path, timeout=timeout) lock.acquire() try: return func(*args, **kwargs) finally: lock.release() return wrapper return decor
Decorator which enables locks for decorated function. Arguments: - path: path for lockfile. - timeout (optional): Timeout for acquiring lock. Usage: @locked('/var/run/myname', timeout=0) def myname(...): ...
def atlasdb_reset_zonefile_tried_storage( con=None, path=None ): with AtlasDBOpen(con=con, path=path) as dbcon: sql = "UPDATE zonefiles SET tried_storage = ? WHERE present = ?;" args = (0, 0) cur = dbcon.cursor() res = atlasdb_query_execute( cur, sql, args ) dbcon.commit() return True
For zonefiles that we don't have, re-attempt to fetch them from storage.
def Terminate(self): self.lock.acquire() try: for bucket in self.connections.values(): try: for conn in bucket: conn.lock() try: conn.Close() except Exception: pass conn.release() except Exception: pass self.connections = {} finally: self.lock.release()
Close all open connections Loop though all the connections and commit all queries and close all the connections. This should be called at the end of your application. @author: Nick Verbeck @since: 5/12/2008
def end_namespace(self, prefix): del self._ns[prefix] self._g.endPrefixMapping(prefix)
Undeclare a namespace prefix.
def place(self, value): if (value is not None) and (not value in DG_C_PLACE): raise ValueError("Unrecognized value for place: '%s'" % value) self.__place = value
Set the place of articulation of the consonant. :param str value: the value to be set
def set_handler(self, language, obj): if obj is None: if language in self._handlers: del self._handlers[language] else: self._handlers[language] = obj
Define a custom language handler for RiveScript objects. Pass in a ``None`` value for the object to delete an existing handler (for example, to prevent Python code from being able to be run by default). Look in the ``eg`` folder of the rivescript-python distribution for an example script that sets up a JavaScript language handler. :param str language: The lowercased name of the programming language. Examples: python, javascript, perl :param class obj: An instance of an implementation class object. It should provide the following interface:: class MyObjectHandler: def __init__(self): pass def load(self, name, code): # name = the name of the object from the RiveScript code # code = the source code of the object def call(self, rs, name, fields): # rs = the current RiveScript interpreter object # name = the name of the object being called # fields = array of arguments passed to the object return reply
def logged_command(cmds): "helper function to log a command and then run it" logger.info(' '.join(cmds)) os.system(' '.join(cmds))
helper function to log a command and then run it
def extras_msg(extras): if len(extras) == 1: verb = "was" else: verb = "were" return ", ".join(repr(extra) for extra in extras), verb
Create an error message for extra items or properties.
def accept_key(pki_dir, pub, id_): for key_dir in 'minions', 'minions_pre', 'minions_rejected': key_path = os.path.join(pki_dir, key_dir) if not os.path.exists(key_path): os.makedirs(key_path) key = os.path.join(pki_dir, 'minions', id_) with salt.utils.files.fopen(key, 'w+') as fp_: fp_.write(salt.utils.stringutils.to_str(pub)) oldkey = os.path.join(pki_dir, 'minions_pre', id_) if os.path.isfile(oldkey): with salt.utils.files.fopen(oldkey) as fp_: if fp_.read() == pub: os.remove(oldkey)
If the master config was available then we will have a pki_dir key in the opts directory, this method places the pub key in the accepted keys dir and removes it from the unaccepted keys dir if that is the case.
def save(self, session_file, verbose=False): PARAMS={"file":session_file} response=api(url=self.__url+"/save", PARAMS=PARAMS, verbose=verbose) return response
Saves the current session to an existing file, which will be replaced. If this is a new session that has not been saved yet, use 'save as' instead. :param session_file: The path to the file where the current session must be saved to. :param verbose: print more
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/', 'locations/', location, '/publishers/', publisher, '/artifacttypes/vmimage/offers/', offer, '/skus/', sku, '/versions?api-version=', COMP_API]) return do_get(endpoint, access_token)
List available versions for a given publisher's sku. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. location (str): Azure data center location. E.g. westus. publisher (str): VM image publisher. E.g. MicrosoftWindowsServer. offer (str): VM image offer. E.g. WindowsServer. sku (str): VM image sku. E.g. 2016-Datacenter. Returns: HTTP response with JSON list of versions.
def namespace_to_dict(obj): if isinstance(obj, (argparse.Namespace, optparse.Values)): return vars(obj) return obj
If obj is argparse.Namespace or optparse.Values we'll return a dict representation of it, else return the original object. Redefine this method if using other parsers. :param obj: * :return: :rtype: dict or *
def add_barplot(self): cats = OrderedDict() cats['n_nondups'] = {'name': 'Non-duplicates'} cats['n_dups'] = {'name': 'Duplicates'} pconfig = { 'id': 'samblaster_duplicates', 'title': 'Samblaster: Number of duplicate reads', 'ylab': 'Number of reads' } self.add_section( plot = bargraph.plot(self.samblaster_data, cats, pconfig) )
Generate the Samblaster bar plot.
def space_labels(document): for label in document.xpath('.//bold'): if not label.text or not re.match('^\(L?\d\d?[a-z]?\):?$', label.text, re.I): continue parent = label.getparent() previous = label.getprevious() if previous is None: text = parent.text or '' if not text.endswith(' '): parent.text = text + ' ' else: text = previous.tail or '' if not text.endswith(' '): previous.tail = text + ' ' text = label.tail or '' if not text.endswith(' '): label.tail = text + ' ' return document
Ensure space around bold compound labels.
def field_types(self): if self._field_types is None: self._field_types = FieldTypeList(self._version, assistant_sid=self._solution['sid'], ) return self._field_types
Access the field_types :returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList :rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
def get_fields(model_class): return [ attr for attr, value in model_class.__dict__.items() if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField)) ]
Pass in a mongo model class and extract all the attributes which are mongoengine fields Returns: list of strings of field attributes
def visit(self, node): method = getattr(self, 'visit_' + node.expr_name, self.generic_visit) try: return method(node, [self.visit(child) for child in reversed(list(node))]) except (VisitationError, UndefinedLabel): raise except self.unwrapped_exceptions: raise except Exception: exc_class, exc, traceback = exc_info() reraise(VisitationError, VisitationError(exc, exc_class, node), traceback)
See the ``NodeVisitor`` visit method. This just changes the order in which we visit nonterminals from right to left to left to right.
def on_ok(self, sender): logger.debug("in on_ok with sender %s" % sender) if sender == self.ion_task and not self.transfer_done: ion_structure = self.ion_task.get_final_structure() self.ioncell_task._change_structure(ion_structure) self.transfer_done = True self.ioncell_task.unlock(source_node=self) elif sender == self.ioncell_task and self.target_dilatmx: actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.) if self.target_dilatmx < actual_dilatmx: self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx) self.history.info('Converging dilatmx. Value reduce from {} to {}.' .format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx'))) self.ioncell_task.reset_from_scratch() return super().on_ok(sender)
This callback is called when one task reaches status S_OK. If sender == self.ion_task, we update the initial structure used by self.ioncell_task and we unlock it so that the job can be submitted.
def compile_delete(self, query): table = self.wrap_table(query.from__) if isinstance(query.wheres, list): wheres = self._compile_wheres(query) else: wheres = "" if query.joins: joins = " %s" % self._compile_joins(query, query.joins) sql = "DELETE %s FROM %s%s %s" % (table, table, joins, wheres) else: sql = "DELETE FROM %s %s" % (table, wheres) sql = sql.strip() if query.orders: sql += " %s" % self._compile_orders(query, query.orders) if query.limit_: sql += " %s" % self._compile_limit(query, query.limit_) return sql
Compile a delete statement into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :return: The compiled update :rtype: str
def dir_import_table(self): import_header = list(self.optional_data_directories)[1] import_offset = self.resolve_rva(import_header.VirtualAddress) i = 0 while True: offset = import_offset + i*Import_DirectoryTable.get_size() idt = Import_DirectoryTable(self.stream, offset, self) if idt.is_empty(): break else: yield idt i += 1
import table is terminated by a all-null entry, so we have to check for that
def set_contents_from_filename(self, filename, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None, reduced_redundancy=False, encrypt_key=False): fp = open(filename, 'rb') self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy, md5, reduced_redundancy, encrypt_key=encrypt_key) fp.close()
Store an object in S3 using the name of the Key object as the key in S3 and the contents of the file named by 'filename'. See set_contents_from_file method for details about the parameters. :type filename: string :param filename: The name of the file that you want to put onto S3 :type headers: dict :param headers: Additional headers to pass along with the request to AWS. :type replace: bool :param replace: If True, replaces the contents of the file if it already exists. :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: A canned ACL policy that will be applied to the new key in S3. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: If you need to compute the MD5 for any reason prior to upload, it's silly to have to do it twice so this param, if present, will be used as the MD5 values of the file. Otherwise, the checksum will be computed. :type reduced_redundancy: bool :param reduced_redundancy: If True, this will set the storage class of the new Key to be REDUCED_REDUNDANCY. The Reduced Redundancy Storage (RRS) feature of S3, provides lower redundancy at lower storage cost. :type encrypt_key: bool :param encrypt_key: If True, the new copy of the object will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3.
def link_href(self, rel): link = self.link(rel) if (link is not None): link = link['href'] return(link)
Look for link with specified rel, return href from it or None.
def to_frequencyseries(self, delta_f=None): from pycbc.fft import fft if not delta_f: delta_f = 1.0 / self.duration tlen = int(1.0 / delta_f / self.delta_t + 0.5) flen = int(tlen / 2 + 1) if tlen < len(self): raise ValueError("The value of delta_f (%s) would be " "undersampled. Maximum delta_f " "is %s." % (delta_f, 1.0 / self.duration)) if not delta_f: tmp = self else: tmp = TimeSeries(zeros(tlen, dtype=self.dtype), delta_t=self.delta_t, epoch=self.start_time) tmp[:len(self)] = self[:] f = FrequencySeries(zeros(flen, dtype=complex_same_precision_as(self)), delta_f=delta_f) fft(tmp, f) return f
Return the Fourier transform of this time series Parameters ---------- delta_f : {None, float}, optional The frequency resolution of the returned frequency series. By default the resolution is determined by the duration of the timeseries. Returns ------- FrequencySeries: The fourier transform of this time series.
def fw_retry_failures_delete(self): for tenant_id in self.fwid_attr: try: with self.fwid_attr[tenant_id].mutex_lock: fw_data = self.get_fw_by_tenant_id(tenant_id) if fw_data is None: LOG.info("No FW for tenant %s", tenant_id) continue result = fw_data.get('result').split('(')[0] if result == fw_constants.RESULT_FW_DELETE_INIT: fw_dict = self.fwid_attr[tenant_id].get_fw_dict() if not fw_dict: fw_dict = self.fill_fw_dict_from_db(fw_data) self.retry_failure_fab_dev_delete(tenant_id, fw_data, fw_dict) except Exception as exc: LOG.error("Exception in retry failure delete %s", str(exc))
This routine is called for retrying the delete cases.
def _find_plugin_dir(module_type): for install_dir in _get_plugin_install_dirs(): candidate = os.path.join(install_dir, module_type) if os.path.isdir(candidate): return candidate else: raise PluginCandidateError( 'No plugin found for `{}` module in paths:\n{}'.format( module_type, '\n'.join(_get_plugin_install_dirs())))
Find the directory containing the plugin definition for the given type. Do this by searching all the paths where plugins can live for a dir that matches the type name.
def subs2seqs(self) -> Dict[str, List[str]]: subs2seqs = collections.defaultdict(list) nodes = find(self.find('sequences'), 'node') if nodes is not None: for seq in nodes: subs2seqs['node'].append(strip(seq.tag)) return subs2seqs
A |collections.defaultdict| containing the node-specific information provided by XML `sequences` element. >>> from hydpy.auxs.xmltools import XMLInterface >>> from hydpy import data >>> interface = XMLInterface('single_run.xml', data.get_path('LahnH')) >>> series_io = interface.series_io >>> subs2seqs = series_io.writers[2].subs2seqs >>> for subs, seq in sorted(subs2seqs.items()): ... print(subs, seq) node ['sim', 'obs']
def main(reactor): control_ep = UNIXClientEndpoint(reactor, '/var/run/tor/control') tor = yield txtorcon.connect(reactor, control_ep) state = yield tor.create_state() print("Closing all circuits:") for circuit in list(state.circuits.values()): path = '->'.join(map(lambda r: r.id_hex, circuit.path)) print("Circuit {} through {}".format(circuit.id, path)) for stream in circuit.streams: print(" Stream {} to {}".format(stream.id, stream.target_host)) yield stream.close() print(" closed") yield circuit.close() print("closed") yield tor.quit()
Close all open streams and circuits in the Tor we connect to
def result_type(*arrays_and_dtypes): types = {np.result_type(t).type for t in arrays_and_dtypes} for left, right in PROMOTE_TO_OBJECT: if (any(issubclass(t, left) for t in types) and any(issubclass(t, right) for t in types)): return np.dtype(object) return np.result_type(*arrays_and_dtypes)
Like np.result_type, but with type promotion rules matching pandas. Examples of changed behavior: number + string -> object (not string) bytes + unicode -> object (not unicode) Parameters ---------- *arrays_and_dtypes : list of arrays and dtypes The dtype is extracted from both numpy and dask arrays. Returns ------- numpy.dtype for the result.
def mouseDown(self, button): log.debug('mouseDown %s', button) self.buttons |= 1 << (button - 1) self.pointerEvent(self.x, self.y, buttonmask=self.buttons) return self
Send a mouse button down at the last set position button: int: [1-n]
def backward(self, loss): with mx.autograd.record(): if isinstance(loss, (tuple, list)): ls = [l * self._scaler.loss_scale for l in loss] else: ls = loss * self._scaler.loss_scale mx.autograd.backward(ls)
backward propagation with loss
def isSquare(matrix): try: try: dim1, dim2 = matrix.shape except AttributeError: dim1, dim2 = _np.array(matrix).shape except ValueError: return False if dim1 == dim2: return True return False
Check that ``matrix`` is square. Returns ======= is_square : bool ``True`` if ``matrix`` is square, ``False`` otherwise.
def write(self, data, params=None, expected_response_code=204, protocol='json'): headers = self._headers headers['Content-Type'] = 'application/octet-stream' if params: precision = params.get('precision') else: precision = None if protocol == 'json': data = make_lines(data, precision).encode('utf-8') elif protocol == 'line': if isinstance(data, str): data = [data] data = ('\n'.join(data) + '\n').encode('utf-8') self.request( url="write", method='POST', params=params, data=data, expected_response_code=expected_response_code, headers=headers ) return True
Write data to InfluxDB. :param data: the data to be written :type data: (if protocol is 'json') dict (if protocol is 'line') sequence of line protocol strings or single string :param params: additional parameters for the request, defaults to None :type params: dict :param expected_response_code: the expected response code of the write operation, defaults to 204 :type expected_response_code: int :param protocol: protocol of input data, either 'json' or 'line' :type protocol: str :returns: True, if the write operation is successful :rtype: bool
def GetUnscannedSubNode(self): if not self.sub_nodes and not self.scanned: return self for sub_node in self.sub_nodes: result = sub_node.GetUnscannedSubNode() if result: return result return None
Retrieves the first unscanned sub node. Returns: SourceScanNode: sub scan node or None if not available.
def addAnnotationsSearchOptions(parser): addAnnotationSetIdArgument(parser) addReferenceNameArgument(parser) addReferenceIdArgument(parser) addStartArgument(parser) addEndArgument(parser) addEffectsArgument(parser) addPageSizeArgument(parser)
Adds common options to a annotation searches command line parser.
def guess_pygments_highlighter(filename): try: from pygments.lexers import get_lexer_for_filename, get_lexer_by_name except Exception: return TextSH root, ext = os.path.splitext(filename) if ext in custom_extension_lexer_mapping: try: lexer = get_lexer_by_name(custom_extension_lexer_mapping[ext]) except Exception: return TextSH else: try: lexer = get_lexer_for_filename(filename) except Exception: return TextSH class GuessedPygmentsSH(PygmentsSH): _lexer = lexer return GuessedPygmentsSH
Factory to generate syntax highlighter for the given filename. If a syntax highlighter is not available for a particular file, this function will attempt to generate one based on the lexers in Pygments. If Pygments is not available or does not have an appropriate lexer, TextSH will be returned instead.
def close_cursor(self, cursor_id, address=None): warnings.warn( "close_cursor is deprecated.", DeprecationWarning, stacklevel=2) if not isinstance(cursor_id, integer_types): raise TypeError("cursor_id must be an instance of (int, long)") self._close_cursor(cursor_id, address)
DEPRECATED - Send a kill cursors message soon with the given id. Raises :class:`TypeError` if `cursor_id` is not an instance of ``(int, long)``. What closing the cursor actually means depends on this client's cursor manager. This method may be called from a :class:`~pymongo.cursor.Cursor` destructor during garbage collection, so it isn't safe to take a lock or do network I/O. Instead, we schedule the cursor to be closed soon on a background thread. :Parameters: - `cursor_id`: id of cursor to close - `address` (optional): (host, port) pair of the cursor's server. If it is not provided, the client attempts to close the cursor on the primary or standalone, or a mongos server. .. versionchanged:: 3.7 Deprecated. .. versionchanged:: 3.0 Added ``address`` parameter.
def analyse(self, path_and_filename, pattern): with open(path_and_filename) as handle: content = handle.read() loc = content.count('\n') + 1 com = 0 for match in re.findall(pattern, content, re.DOTALL): com += match.count('\n') + 1 return max(0, loc - com), com
Find out lines of code and lines of comments. Args: path_and_filename (str): path and filename to parse for loc and com. pattern (str): regex to search for line commens and block comments Returns: int, int: loc and com for given file.
def fist() -> Histogram1D: import numpy as np from ..histogram1d import Histogram1D widths = [0, 1.2, 0.2, 1, 0.1, 1, 0.1, 0.9, 0.1, 0.8] edges = np.cumsum(widths) heights = np.asarray([4, 1, 7.5, 6, 7.6, 6, 7.5, 6, 7.2]) + 5 return Histogram1D(edges, heights, axis_name="Is this a fist?", title="Physt \"logo\"")
A simple histogram in the shape of a fist.
def get_prefix(self, form, name): return '{form_prefix}{prefix_name}-{field_name}'.format( form_prefix=form.prefix + '-' if form.prefix else '', prefix_name=self.prefix_name, field_name=name)
Return the prefix that is used for the formset.
def connection(self, connection): if connection is not None: connection.subscribe("capacity", self._on_capacity_data) connection.default_return_capacity = True if self._connection is not None: connection.unsubscribe("capacity", self._on_capacity_data) self._connection = connection self._cloudwatch_connection = None self.cached_descriptions = {}
Change the dynamo connection
def parse(input_: Union[str, FileStream], source: str) -> Optional[str]: error_listener = ParseErrorListener() if not isinstance(input_, FileStream): input_ = InputStream(input_) lexer = jsgLexer(input_) lexer.addErrorListener(error_listener) tokens = CommonTokenStream(lexer) tokens.fill() if error_listener.n_errors: return None parser = jsgParser(tokens) parser.addErrorListener(error_listener) parse_tree = parser.doc() if error_listener.n_errors: return None parser = JSGDocParser() parser.visit(parse_tree) if parser.undefined_tokens(): for tkn in parser.undefined_tokens(): print("Undefined token: " + tkn) return None return parser.as_python(source)
Parse the text in infile and save the results in outfile :param input_: string or stream to parse :param source: source name for python file header :return: python text if successful
def parse(self, data): self.validate_packet(data) packet_length = data[0] packet_type = data[1] sub_type = data[2] sequence_number = data[3] command_type = data[4] transceiver_type = data[5] transceiver_type_text = _MSG1_RECEIVER_TYPE.get(data[5]) firmware_version = data[6] flags = self._int_to_binary_list(data[7]) flags.extend(self._int_to_binary_list(data[8])) flags.extend(self._int_to_binary_list(data[9])) enabled, disabled = self._log_enabled_protocols(flags, PROTOCOLS) return { 'packet_length': packet_length, 'packet_type': packet_type, 'packet_type_name': self.PACKET_TYPES.get(packet_type), 'sequence_number': sequence_number, 'sub_type': sub_type, 'sub_type_name': self.PACKET_SUBTYPES.get(sub_type), 'command_type': command_type, 'transceiver_type': transceiver_type, 'transceiver_type_text': transceiver_type_text, 'firmware_version': firmware_version, 'enabled_protocols': enabled, 'disabled_protocols': disabled, }
Parse a 13 byte packet in the Status format. :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict
def setup_app(app, api): api.add_resource( KnwKBAllResource, '/api/knowledge' ) api.add_resource( KnwKBResource, '/api/knowledge/<string:slug>' ) api.add_resource( KnwKBMappingsResource, '/api/knowledge/<string:slug>/mappings' ) api.add_resource( KnwKBMappingsToResource, '/api/knowledge/<string:slug>/mappings/to' ) api.add_resource( KnwKBMappingsFromResource, '/api/knowledge/<string:slug>/mappings/from' ) api.add_resource( NotImplementedKnowledegeResource, '/api/knowledge/<string:slug>/<path:foo>' )
setup the resources urls.
def handlePosition(self, msg): self.log_msg("position", msg) contract_tuple = self.contract_to_tuple(msg.contract) contractString = self.contractString(contract_tuple) self.registerContract(msg.contract) if msg.account not in self._positions.keys(): self._positions[msg.account] = {} self._positions[msg.account][contractString] = { "symbol": contractString, "position": int(msg.pos), "avgCost": float(msg.avgCost), "account": msg.account } self.ibCallback(caller="handlePosition", msg=msg)
handle positions changes
def _update(self, rules: list): self._rules = rules to_store = '\n'.join( rule.config_string for rule in rules ) sftp_connection = self._sftp_connection with sftp_connection.open(self.RULE_PATH, mode='w') as file_handle: file_handle.write(to_store)
Updates the given rules and stores them on the router.
def update_pulled_fields(instance, imported_instance, fields): modified = False for field in fields: pulled_value = getattr(imported_instance, field) current_value = getattr(instance, field) if current_value != pulled_value: setattr(instance, field, pulled_value) logger.info("%s's with PK %s %s field updated from value '%s' to value '%s'", instance.__class__.__name__, instance.pk, field, current_value, pulled_value) modified = True error_message = getattr(imported_instance, 'error_message', '') or getattr(instance, 'error_message', '') if error_message and instance.error_message != error_message: instance.error_message = imported_instance.error_message modified = True if modified: instance.save()
Update instance fields based on imported from backend data. Save changes to DB only one or more fields were changed.
def prepare_soap_body(self, method, parameters, namespace): tags = [] for name, value in parameters: tag = "<{name}>{value}</{name}>".format( name=name, value=escape("%s" % value, {'"': "&quot;"})) tags.append(tag) wrapped_params = "".join(tags) if namespace is not None: soap_body = ( '<{method} xmlns="{namespace}">' '{params}' '</{method}>'.format( method=method, params=wrapped_params, namespace=namespace )) else: soap_body = ( '<{method}>' '{params}' '</{method}>'.format( method=method, params=wrapped_params )) return soap_body
Prepare the SOAP message body for sending. Args: method (str): The name of the method to call. parameters (list): A list of (name, value) tuples containing the parameters to pass to the method. namespace (str): tThe XML namespace to use for the method. Returns: str: A properly formatted SOAP Body.
def during(f): def decorator(g): @wraps(g) def h(*args, **kargs): tf = Thread(target=f, args=args, kwargs=kargs) tf.start() r = g(*args, **kargs) tf.join() return r return h return decorator
Runs f during the decorated function's execution in a separate thread.
def render(self): f1 = self._format_alignment(self._alignment[0], self._alignment[1]) f2 = self._format_alignment(self._alignment[1], self._alignment[0]) return f1, f2
Returns a tuple of HTML fragments rendering each element of the sequence.
def send(self, url, **kwargs): if self.config.server_url: return super(DjangoClient, self).send(url, **kwargs) else: self.error_logger.error("No server configured, and elasticapm not installed. Cannot send message") return None
Serializes and signs ``data`` and passes the payload off to ``send_remote`` If ``server`` was passed into the constructor, this will serialize the data and pipe it to the server using ``send_remote()``.