code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_config_object(): global _DEFAULT_CONFIG_WRAPPER if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER with _DEFAULT_CONFIG_WRAPPER_LOCK: if _DEFAULT_CONFIG_WRAPPER is not None: return _DEFAULT_CONFIG_WRAPPER _DEFAULT_CONFIG_WRAPPER = ConfigWrapper() return _DEFAULT_CONFIG_WRAPPER
Thread-safe accessor for the immutable default ConfigWrapper object
def create_cas_validate_url(cas_url, cas_route, service, ticket, renew=None): return create_url( cas_url, cas_route, ('service', service), ('ticket', ticket), ('renew', renew), )
Create a CAS validate URL. Keyword arguments: cas_url -- The url to the CAS (ex. http://sso.pdx.edu) cas_route -- The route where the CAS lives on server (ex. /cas/serviceValidate) service -- (ex. http://localhost:5000/login) ticket -- (ex. 'ST-58274-x839euFek492ou832Eena7ee-cas') renew -- "true" or "false" Example usage: >>> create_cas_validate_url( ... 'http://sso.pdx.edu', ... '/cas/serviceValidate', ... 'http://localhost:5000/login', ... 'ST-58274-x839euFek492ou832Eena7ee-cas' ... ) 'http://sso.pdx.edu/cas/serviceValidate?service=http%3A%2F%2Flocalhost%3A5000%2Flogin&ticket=ST-58274-x839euFek492ou832Eena7ee-cas'
def _request_token(self, force=False): if self.login_data is None: raise RuntimeError("Don't have a token to refresh") if not force: if not self._requires_refresh_token(): return True headers = { "Accept": "application/json", 'Authorization': 'Bearer ' + self.login_data['token']['accessToken'] } url = self.api_base_url + "account/RefreshToken" response = requests.get(url, headers=headers, timeout=10) if response.status_code != 200: return False refresh_data = response.json() if 'token' not in refresh_data: return False self.login_data['token']['accessToken'] = refresh_data['accessToken'] self.login_data['token']['issuedOn'] = refresh_data['issuedOn'] self.login_data['token']['expiresOn'] = refresh_data['expiresOn'] return True
Request a new auth token
def reraise_if_any(failures, cause_cls_finder=None): if not isinstance(failures, (list, tuple)): failures = list(failures) if len(failures) == 1: failures[0].reraise(cause_cls_finder=cause_cls_finder) elif len(failures) > 1: raise WrappedFailure(failures)
Re-raise exceptions if argument is not empty. If argument is empty list/tuple/iterator, this method returns None. If argument is converted into a list with a single ``Failure`` object in it, that failure is reraised. Else, a :class:`~.WrappedFailure` exception is raised with the failure list as causes.
def _clear_policy(self, lambda_name): try: policy_response = self.lambda_client.get_policy( FunctionName=lambda_name ) if policy_response['ResponseMetadata']['HTTPStatusCode'] == 200: statement = json.loads(policy_response['Policy'])['Statement'] for s in statement: delete_response = self.lambda_client.remove_permission( FunctionName=lambda_name, StatementId=s['Sid'] ) if delete_response['ResponseMetadata']['HTTPStatusCode'] != 204: logger.error('Failed to delete an obsolete policy statement: {}'.format(policy_response)) else: logger.debug('Failed to load Lambda function policy: {}'.format(policy_response)) except ClientError as e: if e.args[0].find('ResourceNotFoundException') > -1: logger.debug('No policy found, must be first run.') else: logger.error('Unexpected client error {}'.format(e.args[0]))
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
def session_rollback(self, session): if not hasattr(session, 'meepo_unique_id'): self.logger.debug("skipped - session_rollback") return self.logger.debug("%s - after_rollback" % session.meepo_unique_id) signal("session_rollback").send(session) self._session_del(session)
Send session_rollback signal in sqlalchemy ``after_rollback``. This marks the failure of session so the session may enter commit phase.
def norm(x, mu, sigma=1.0): return stats.norm(loc=mu, scale=sigma).pdf(x)
Scipy norm function
def disconnect_sync(self, conn_id): done = threading.Event() result = {} def disconnect_done(conn_id, adapter_id, status, reason): result['success'] = status result['failure_reason'] = reason done.set() self.disconnect_async(conn_id, disconnect_done) done.wait() return result
Synchronously disconnect from a connected device Args: conn_id (int): A unique identifier that will refer to this connection Returns: dict: A dictionary with two elements 'success': a bool with the result of the connection attempt 'failure_reason': a string with the reason for the failure if we failed
def get_byte_array(integer): return int.to_bytes( integer, (integer.bit_length() + 8 - 1) // 8, byteorder='big', signed=False )
Return the variable length bytes corresponding to the given int
def get_book_info(cursor, real_dict_cursor, book_id, book_version, page_id, page_version): book_ident_hash = join_ident_hash(book_id, book_version) page_ident_hash = join_ident_hash(page_id, page_version) tree = get_tree(book_ident_hash, cursor) if not tree or page_ident_hash not in flatten_tree_to_ident_hashes(tree): raise httpexceptions.HTTPNotFound() sql_statement = real_dict_cursor.execute(sql_statement, vars=(book_ident_hash,)) return real_dict_cursor.fetchone()
Return information about a given book. Return the book's title, id, shortId, authors and revised date. Raise HTTPNotFound if the page is not in the book.
def use_theme(theme): global current current = theme import scene if scene.current is not None: scene.current.stylize()
Make the given theme current. There are two included themes: light_theme, dark_theme.
def search_certificate(self, hash): c = CensysCertificates(api_id=self.__uid, api_secret=self.__api_key) return c.view(hash)
Searches for a specific certificate using its hash :param hash: certificate hash :type hash: str :return: dict
def _extra_compile_time_classpath(self): def extra_compile_classpath_iter(): for conf in self._confs: for jar in self.extra_compile_time_classpath_elements(): yield (conf, jar) return list(extra_compile_classpath_iter())
Compute any extra compile-time-only classpath elements.
async def _watchdog(self, timeout): await asyncio.sleep(timeout, loop=self.loop) _LOGGER.debug("Watchdog triggered!") await self.cancel_watchdog() await self._watchdog_cb()
Trigger and cancel the watchdog after timeout. Call callback.
def is_file(dirname): if not os.path.isfile(dirname): msg = "{0} is not an existing file".format(dirname) raise argparse.ArgumentTypeError(msg) else: return dirname
Checks if a path is an actual file that exists
def pid(name): try: return int(info(name).get('PID')) except (TypeError, ValueError) as exc: raise CommandExecutionError( 'Unable to get PID for container \'{0}\': {1}'.format(name, exc) )
Returns the PID of a container name Container name CLI Example: .. code-block:: bash salt myminion nspawn.pid arch1
def get_scope_path(self, scope_separator="::"): if self.parent_scope is None: return "" elif isinstance(self.parent_scope, Root): return "" else: parent_path = self.parent_scope.get_scope_path(scope_separator) if parent_path: return( parent_path + scope_separator + self.parent_scope.type_name ) else: return self.parent_scope.type_name
Generate a string that represents this component's declaration namespace scope. Parameters ---------- scope_separator: str Override the separator between namespace scopes
def modify_column_if_table_exists(self, tablename: str, fieldname: str, newdef: str) -> Optional[int]: if not self.table_exists(tablename): return None sql = "ALTER TABLE {t} MODIFY COLUMN {field} {newdef}".format( t=tablename, field=fieldname, newdef=newdef ) log.info(sql) return self.db_exec_literal(sql)
Alters a column's definition without renaming it.
def clean_resource_json(resource_json): for a in ('parent_docname', 'parent', 'template', 'repr', 'series'): if a in resource_json: del resource_json[a] props = resource_json['props'] for prop in ( 'acquireds', 'style', 'in_nav', 'nav_title', 'weight', 'auto_excerpt'): if prop in props: del props[prop] return resource_json
The catalog wants to be smaller, let's drop some stuff
def _validate_timeout(seconds: float): val = int(seconds * 1000) assert 60000 <= val <= 4294967294, "Bad value: {}".format(val) return val
Creates an int from 60000 to 4294967294 that represents a valid millisecond wireless LAN timeout
def union(self, other, renorm=True): for d in range(1, min(self.maxdepth, other.maxdepth)+1): self.add_pixels(other.pixeldict[d], d) if self.maxdepth < other.maxdepth: for d in range(self.maxdepth+1, other.maxdepth+1): for p in other.pixeldict[d]: pp = p/4**(d-self.maxdepth) self.pixeldict[self.maxdepth].add(pp) if renorm: self._renorm() return
Add another Region by performing union on their pixlists. Parameters ---------- other : :class:`AegeanTools.regions.Region` The region to be combined. renorm : bool Perform renormalisation after the operation? Default = True.
def runGetContinuousSet(self, id_): compoundId = datamodel.ContinuousSetCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) continuousSet = dataset.getContinuousSet(id_) return self.runGetRequest(continuousSet)
Runs a getContinuousSet request for the specified ID.
def _produce_return(self, cursor): results = cursor.fetchall() if self._row_formatter is not None: return (self._row_formatter(r, cursor) for r in results) return results
Get the rows from the cursor and apply the row formatter. :return: sequence of rows, or a generator if a row formatter has to be applied
def upload_file_boto(fname, remote_fname, mditems=None): r_fname = objectstore.parse_remote(remote_fname) conn = objectstore.connect(remote_fname) bucket = conn.lookup(r_fname.bucket) if not bucket: bucket = conn.create_bucket(r_fname.bucket, location=objectstore.get_region(remote_fname)) key = bucket.get_key(r_fname.key, validate=False) if mditems is None: mditems = {} if "x-amz-server-side-encryption" not in mditems: mditems["x-amz-server-side-encryption"] = "AES256" for name, val in mditems.items(): key.set_metadata(name, val) key.set_contents_from_filename(fname, encrypt_key=True)
Upload a file using boto instead of external tools.
def create(): if not all(map(os.path.isdir, ARGS.directory)): exit('Error: One or more of the specified directories does not exist.') with sqlite3.connect(ARGS.database) as connection: connection.text_factory = str cursor = connection.cursor() cursor.execute('DROP TABLE IF EXISTS Movies') cursor.execute( ) for dir in ARGS.directory: cursor.executemany('INSERT INTO Movies VALUES(?, ?, ?, ?)', local_data(dir))
Create a new database with information about the films in the specified directory or directories.
def _row_resized(self, row, old_height, new_height): self.dataTable.setRowHeight(row, new_height) self._update_layout()
Update the row height.
def clean_structure(self, out_suffix='_clean', outdir=None, force_rerun=False, remove_atom_alt=True, keep_atom_alt_id='A',remove_atom_hydrogen=True, add_atom_occ=True, remove_res_hetero=True, keep_chemicals=None, keep_res_only=None, add_chain_id_if_empty='X', keep_chains=None): if not self.structure_file: log.error('{}: no structure file, unable to clean'.format(self.id)) return None clean_pdb_file = ssbio.protein.structure.utils.cleanpdb.clean_pdb(self.structure_path, out_suffix=out_suffix, outdir=outdir, force_rerun=force_rerun, remove_atom_alt=remove_atom_alt, remove_atom_hydrogen=remove_atom_hydrogen, keep_atom_alt_id=keep_atom_alt_id, add_atom_occ=add_atom_occ, remove_res_hetero=remove_res_hetero, keep_chemicals=keep_chemicals, keep_res_only=keep_res_only, add_chain_id_if_empty=add_chain_id_if_empty, keep_chains=keep_chains) return clean_pdb_file
Clean the structure file associated with this structure, and save it as a new file. Returns the file path. Args: out_suffix (str): Suffix to append to original filename outdir (str): Path to output directory force_rerun (bool): If structure should be re-cleaned if a clean file exists already remove_atom_alt (bool): Remove alternate positions keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep remove_atom_hydrogen (bool): Remove hydrogen atoms add_atom_occ (bool): Add atom occupancy fields if not present remove_res_hetero (bool): Remove all HETATMs keep_chemicals (str, list): If removing HETATMs, keep specified chemical names keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else! add_chain_id_if_empty (str): Add a chain ID if not present keep_chains (str, list): Keep only these chains Returns: str: Path to cleaned PDB file
def fire_ret_load(self, load): if load.get('retcode') and load.get('fun'): if isinstance(load['fun'], list): if isinstance(load['retcode'], list): multifunc_ordered = True else: multifunc_ordered = False for fun_index in range(0, len(load['fun'])): fun = load['fun'][fun_index] if multifunc_ordered: if (len(load['retcode']) > fun_index and load['retcode'][fun_index] and fun in SUB_EVENT): self._fire_ret_load_specific_fun(load, fun_index) else: if load['retcode'].get(fun, 0) and fun in SUB_EVENT: self._fire_ret_load_specific_fun(load, fun_index) else: if load['fun'] in SUB_EVENT: self._fire_ret_load_specific_fun(load)
Fire events based on information in the return load
def output_file_name(self): safe_path = re.sub(r":|/", "_", self.source_urn.Path().lstrip("/")) return "results_%s%s" % (safe_path, self.output_file_extension)
Name of the file where plugin's output should be written to.
def addRow(self, *row): row = [ str(item) for item in row ] len_row = [ len(item) for item in row ] width = self.__width len_old = len(width) len_new = len(row) known = min(len_old, len_new) missing = len_new - len_old if missing > 0: width.extend( len_row[ -missing : ] ) elif missing < 0: len_row.extend( [0] * (-missing) ) self.__width = [ max( width[i], len_row[i] ) for i in compat.xrange(len(len_row)) ] self.__cols.append(row)
Add a row to the table. All items are converted to strings. @type row: tuple @keyword row: Each argument is a cell in the table.
def patch(): from twisted.application.service import Service old_startService = Service.startService old_stopService = Service.stopService def startService(self): assert not self.running, "%r already running" % (self,) return old_startService(self) def stopService(self): assert self.running, "%r already stopped" % (self,) return old_stopService(self) Service.startService = startService Service.stopService = stopService
Patch startService and stopService so that they check the previous state first. (used for debugging only)
def transform_sources(self, sources, with_string=False): modules = {} updater = partial( self.replace_source, modules=modules, prefix='string_') for filename in sources: updated = update_func_body(sources[filename], updater) sources[filename] = EXTERN_AND_SEG + updated logging.debug('modules: %s', modules) return sources, self.build_funcs(modules)
Get the defintions of needed strings and functions after replacement.
def get_url(self, *paths, **params): path_stack = self._attribute_stack[:] if paths: path_stack.extend(paths) u = self._stack_collapser(path_stack) url = self._url_template % { "domain": self._api_url, "generated_url" : u, } if self._params or params: internal_params = self._params.copy() internal_params.update(params) url += self._generate_params(internal_params) return url
Returns the URL for this request. :param paths: Additional URL path parts to add to the request :param params: Additional query parameters to add to the request
def update_contributions(sender, instance, action, model, pk_set, **kwargs): if action != 'pre_add': return else: for author in model.objects.filter(pk__in=pk_set): update_content_contributions(instance, author)
Creates a contribution for each author added to an article.
def _registerHandler(self, handler): self._logger.addHandler(handler) self._handlers.append(handler)
Registers a handler. :param handler: A handler object.
def get_line_break_property(value, is_bytes=False): obj = unidata.ascii_line_break if is_bytes else unidata.unicode_line_break if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['linebreak'].get(negated, negated) else: value = unidata.unicode_alias['linebreak'].get(value, value) return obj[value]
Get `LINE BREAK` property.
def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs): wc = self.get_wcxf(C_out, scale_out) return wc.dump(fmt=fmt, stream=stream, **kwargs)
Return a string representation of the Wilson coefficients `C_out` in WCxf format. If `stream` is specified, export it to a file. `fmt` defaults to `yaml`, but can also be `json`. Note that the Wilson coefficients are rotated into the Warsaw basis as defined in WCxf, i.e. to the basis where the down-type and charged lepton mass matrices are diagonal.
def get_random(self): import random Statement = self.get_model('statement') session = self.Session() count = self.count() if count < 1: raise self.EmptyDatabaseException() random_index = random.randrange(0, count) random_statement = session.query(Statement)[random_index] statement = self.model_to_object(random_statement) session.close() return statement
Returns a random statement from the database.
def get_partial_contenthandler(element): from ligo.lw.ligolw import PartialLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return element.CheckProperties(name, attrs) else: def _element_filter(name, _): return name == element.tagName return build_content_handler(PartialLIGOLWContentHandler, _element_filter)
Build a `PartialLIGOLWContentHandler` to read only this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element class to be read, Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.PartialLIGOLWContentHandler` to read only the given `element`
def components(accountable, project_key): components = accountable.project_components(project_key) headers = sorted(['id', 'name', 'self']) rows = [[v for k, v in sorted(component.items()) if k in headers] for component in components] rows.insert(0, headers) print_table(SingleTable(rows))
Returns a list of all a project's components.
def getfile(data_name, path): data_source = get_data_object(data_name, use_data_config=False) if not data_source: if 'output' in data_name: floyd_logger.info("Note: You cannot clone the output of a running job. You need to wait for it to finish.") sys.exit() url = "{}/api/v1/resources/{}/{}?content=true".format(floyd.floyd_host, data_source.resource_id, path) fname = os.path.basename(path) DataClient().download(url, filename=fname) floyd_logger.info("Download finished")
Download a specific file from a dataset.
def find_types(self, site=None, match=r'^(?!lastfile|spectro|\.).*'): self._find_paths() types = [tag for (site_, tag) in self.paths if site in (None, site_)] if match is not None: match = re.compile(match) return list(filter(match.search, types)) return types
Return the list of known data types. This is just the basename of each FFL file found in the FFL directory (minus the ``.ffl`` extension)
def reset(self): self.gametree = self.game self.nodenum = 0 self.index = 0 self.stack = [] self.node = self.gametree[self.index] self._setChildren() self._setFlags()
Set 'Cursor' to point to the start of the root 'GameTree', 'self.game'.
def namespace(self, namespace, to=None): fields = get_apphook_field_names(self.model) if not fields: raise ValueError( ugettext( 'Can\'t find any relation to an ApphookConfig model in {0}' ).format(self.model.__name__) ) if to and to not in fields: raise ValueError( ugettext( 'Can\'t find relation to ApphookConfig model named ' '"{0}" in "{1}"' ).format(to, self.model.__name__) ) if len(fields) > 1 and to not in fields: raise ValueError( ugettext( '"{0}" has {1} relations to an ApphookConfig model.' ' Please, specify which one to use in argument "to".' ' Choices are: {2}' ).format( self.model.__name__, len(fields), ', '.join(fields) ) ) else: if not to: to = fields[0] lookup = '{0}__namespace'.format(to) kwargs = {lookup: namespace} return self.filter(**kwargs)
Filter by namespace. Try to guess which field to use in lookup. Accept 'to' argument if you need to specify.
def askForFolder(parent, msg = None): msg = msg or 'Select folder' caller = _callerName().split(".") name = "/".join([LAST_PATH, caller[-1]]) namespace = caller[0] path = pluginSetting(name, namespace) folder = QtWidgets.QFileDialog.getExistingDirectory(parent, msg, path) if folder: setPluginSetting(name, folder, namespace) return folder
Asks for a folder, opening the corresponding dialog with the last path that was selected when this same function was invoked from the calling method :param parent: The parent window :param msg: The message to use for the dialog title
def unmarshall_value(self, value): value = str(value) if self.escapeValues: value = value.decode('hex') if self.compressValues: value = zlib.decompress(value) value = pickle.loads(value) return value
Unmarshalls a Crash object read from the database. @type value: str @param value: Object to convert. @rtype: L{Crash} @return: Converted object.
def probe(self, axis: str, distance: float) -> Dict[str, float]: return self._smoothie_driver.probe_axis(axis, distance)
Run a probe and return the new position dict
def ensure_size( self, size = None ): if size is None: size = self.size_constraint while sys.getsizeof(self) > size: element_frequencies = collections.Counter(self) infrequent_element = element_frequencies.most_common()[-1:][0][0] self.remove(infrequent_element)
This function removes the least frequent elements until the size constraint is met.
def files(self): all_files = set() for label in self.filesets: all_files.update(self.filesets[label]) return all_files
Get all files in the chroot.
def encode_datetime(o): r = o.isoformat() if o.microsecond: r = r[:23] + r[26:] if r.endswith('+00:00'): r = r[:-6] + 'Z' return r
Encodes a Python datetime.datetime object as an ECMA-262 compliant datetime string.
def image(self): slide_part, rId = self.part, self._element.blip_rId if rId is None: raise ValueError('no embedded image') return slide_part.get_image(rId)
An |Image| object providing access to the properties and bytes of the image in this picture shape.
def estimate_bg(self, fit_offset="mean", fit_profile="tilt", border_px=0, from_mask=None, ret_mask=False): self.set_bg(bg=None, key="fit") bgimage, mask = bg_estimate.estimate(data=self.image, fit_offset=fit_offset, fit_profile=fit_profile, border_px=border_px, from_mask=from_mask, ret_mask=True) attrs = {"fit_offset": fit_offset, "fit_profile": fit_profile, "border_px": border_px} self.set_bg(bg=bgimage, key="fit", attrs=attrs) self["estimate_bg_from_mask"] = from_mask if ret_mask: return mask
Estimate image background Parameters ---------- fit_profile: str The type of background profile to fit: - "offset": offset only - "poly2o": 2D 2nd order polynomial with mixed terms - "tilt": 2D linear tilt with offset (default) fit_offset: str The method for computing the profile offset - "fit": offset as fitting parameter - "gauss": center of a gaussian fit - "mean": simple average - "mode": mode (see `qpimage.bg_estimate.mode`) border_px: float Assume that a frame of `border_px` pixels around the image is background. from_mask: boolean np.ndarray or None Use a boolean array to define the background area. The mask image must have the same shape as the input data.`True` elements are used for background estimation. ret_mask: bool Return the mask image used to compute the background. Notes ----- If both `border_px` and `from_mask` are given, the intersection of the two resulting mask images is used. The arguments passed to this method are stored in the hdf5 file `self.h5` and are used for optional integrity checking using `qpimage.integrity_check.check`. See Also -------- qpimage.bg_estimate.estimate
def create_metadata(self, **params): params = json.dumps(params) return self.post("https://upload.twitter.com/1.1/media/metadata/create.json", params=params)
Adds metadata to a media element, such as image descriptions for visually impaired. Docs: https://developer.twitter.com/en/docs/media/upload-media/api-reference/post-media-metadata-create
def get_last_scene_time(self, refresh=False): if refresh: self.refresh_complex_value('LastSceneTime') val = self.get_complex_value('LastSceneTime') return val
Get last scene time. Refresh data from Vera if refresh is True, otherwise use local cache. Refresh is only needed if you're not using subscriptions.
def _check_email_changed(cls, username, email): ret = cls.exec_request('user/{}'.format(username), 'get', raise_for_status=True) return ret['email'] != email
Compares email to one set on SeAT
def histogram(self, bmus=None): if bmus is None: assert self._bmus is not None, 'not trained' bmus = self._bmus arr = np.zeros((self._som.nrows, self._som.ncols)) for i,j in bmus: arr[i,j] += 1 return arr
\ Return a 2D histogram of bmus. :param bmus: the best-match units indexes for underlying data. :type bmus: :class:`numpy.ndarray` :returns: the computed 2D histogram of bmus. :rtype: :class:`numpy.ndarray`
def getReliableListeners(self): for rellist in self.store.query(_ReliableListener, _ReliableListener.processor == self): yield rellist.listener
Return an iterable of the listeners which have been added to this batch processor.
def _raw_aspera_metadata(self, bucket): response = self._client.get_bucket_aspera(Bucket=bucket) aspera_access_key = response['AccessKey']['Id'] aspera_secret_key = response['AccessKey']['Secret'] ats_endpoint = response['ATSEndpoint'] return aspera_access_key, aspera_secret_key, ats_endpoint
get the Aspera connection details on Aspera enabled buckets
def get(self, query, sort, page, size): urlkwargs = { 'q': query, 'sort': sort, 'size': size, } communities = Community.filter_communities(query, sort) page = communities.paginate(page, size) links = default_links_pagination_factory(page, urlkwargs) links_headers = map(lambda key: ('link', 'ref="{0}" href="{1}"'.format( key, links[key])), links) return self.make_response( page, headers=links_headers, links_item_factory=default_links_item_factory, page=page, urlkwargs=urlkwargs, links_pagination_factory=default_links_pagination_factory, )
Get a list of all the communities. .. http:get:: /communities/(string:id) Returns a JSON list with all the communities. **Request**: .. sourcecode:: http GET /communities HTTP/1.1 Accept: application/json Content-Type: application/json Host: localhost:5000 :reqheader Content-Type: application/json **Response**: .. sourcecode:: http HTTP/1.0 200 OK Content-Length: 334 Content-Type: application/json [ { "id": "comm1" }, { "id": "comm2" } ] :resheader Content-Type: application/json :statuscode 200: no error
def on_click(self, button, **kwargs): actions = ['leftclick', 'middleclick', 'rightclick', 'upscroll', 'downscroll'] try: action = actions[button - 1] except (TypeError, IndexError): self.__log_button_event(button, None, None, "Other button") action = "otherclick" m_click = self.__multi_click with m_click.lock: double = m_click.check_double(button) double_action = 'double%s' % action if double: action = double_action cb = getattr(self, 'on_%s' % action, None) double_handler = getattr(self, 'on_%s' % double_action, None) delay_execution = (not double and double_handler) if delay_execution: m_click.set_timer(button, cb, **kwargs) else: self.__button_callback_handler(button, cb, **kwargs)
Maps a click event with its associated callback. Currently implemented events are: ============ ================ ========= Event Callback setting Button ID ============ ================ ========= Left click on_leftclick 1 Middle click on_middleclick 2 Right click on_rightclick 3 Scroll up on_upscroll 4 Scroll down on_downscroll 5 Others on_otherclick > 5 ============ ================ ========= The action is determined by the nature (type and value) of the callback setting in the following order: 1. If null callback (``None``), no action is taken. 2. If it's a `python function`, call it and pass any additional arguments. 3. If it's name of a `member method` of current module (string), call it and pass any additional arguments. 4. If the name does not match with `member method` name execute program with such name. .. seealso:: :ref:`callbacks` for more information about callback settings and examples. :param button: The ID of button event received from i3bar. :param kwargs: Further information received from i3bar like the positions of the mouse where the click occured. :return: Returns ``True`` if a valid callback action was executed. ``False`` otherwise.
def _match_registers(self, query): if query in self._status_registers: register = self._status_registers[query] response = register.value logger.debug('Found response in status register: %s', repr(response)) register.clear() return response
Tries to match in status registers :param query: message tuple :type query: Tuple[bytes] :return: response if found or None :rtype: Tuple[bytes] | None
def get_perm_codename(perm, fail_silently=True): try: perm = perm.split('.', 1)[1] except IndexError as e: if not fail_silently: raise e return perm
Get permission codename from permission-string. Examples -------- >>> get_perm_codename('app_label.codename_model') 'codename_model' >>> get_perm_codename('app_label.codename') 'codename' >>> get_perm_codename('codename_model') 'codename_model' >>> get_perm_codename('codename') 'codename' >>> get_perm_codename('app_label.app_label.codename_model') 'app_label.codename_model'
def add_item(self, item): if not(isinstance(item.name, basestring) and isinstance(item.description, basestring)): raise TypeError("Name and description should be strings, are of type {} and {}" .format(type(item.name), type(item.description))) if not(isinstance(item.flag_type, FlagType)): raise TypeError("Flag type should be of type FlagType, is of {}".format(type(item.flag_type))) if item.name not in self._flags: if item.default is not None: if item.default is not False: item.description = item.description + " (default: %(default)s)" self._flags[item.name] = item else: self._flags[item.name] = item
Add single command line flag Arguments: name (:obj:`str`): Name of flag used in command line flag_type (:py:class:`snap_plugin.v1.plugin.FlagType`): Indication if flag should store value or is simple bool flag description (:obj:`str`): Flag description used in command line default (:obj:`object`, optional): Optional default value for flag Raises: TypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError
def _cache_key_select_sample_type(method, self, allow_blank=True, multiselect=False, style=None): key = update_timer(), allow_blank, multiselect, style return key
This function returns the key used to decide if method select_sample_type has to be recomputed
def ddns(self, domain_id, record_id, sub_domain, record_line, value): record = self.info(domain_id, record_id) if record.sub_domain == sub_domain and \ record.record_line == record_line and \ record.value == value: return self._api.do_post('Record.Ddns', domain_id=domain_id, record_id=record_id, sub_domain=sub_domain, record_line=record_line, value=value)
Update record's value dynamically If the ``value`` is different from the record's current value, then perform a dynamic record update. Otherwise, nothing will be done. :param str domain_id: Domain ID :param str record_id: Record ID :param str sub_domain: Sub domain of domain (e.g., **www** of **www.google.com**) :param str record_line: Line of the record :param str value: The record's value, an IP address
def write(filename, mesh, fmt_version, write_binary=True): try: writer = _writers[fmt_version] except KeyError: try: writer = _writers[fmt_version.split(".")[0]] except KeyError: raise ValueError( "Need mesh format in {} (got {})".format( sorted(_writers.keys()), fmt_version ) ) writer.write(filename, mesh, write_binary=write_binary)
Writes a Gmsh msh file.
def delete_files_in_folder(fldr): fl = glob.glob(fldr + os.sep + '*.*') for f in fl: delete_file(f, True)
delete all files in folder 'fldr'
def check_syntax(string): args = ["ecpg", "-o", "-", "-"] with open(os.devnull, "w") as devnull: try: proc = subprocess.Popen(args, shell=False, stdout=devnull, stdin=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) _, err = proc.communicate(string) except OSError: msg = "Unable to execute 'ecpg', you likely need to install it.'" raise OSError(msg) if proc.returncode == 0: return (True, "") else: return (False, parse_error(err))
Check syntax of a string of PostgreSQL-dialect SQL
def get_index_mappings(self, index): fields_arr = [] for (key, val) in iteritems(index): doc_mapping = self.get_doc_type_mappings(index[key]) if doc_mapping is None: return None fields_arr.extend(doc_mapping) return fields_arr
Converts all index's doc_types to .kibana
def load(cls, path): data = json.load(open(path)) weights = data['weights'] weights = np.asarray(weights, dtype=np.float64) s = cls(data['map_dimensions'], data['params']['lr']['orig'], data['data_dimensionality'], influence=data['params']['infl']['orig'], lr_lambda=data['params']['lr']['factor'], infl_lambda=data['params']['infl']['factor']) s.weights = weights s.trained = True return s
Load a SOM from a JSON file saved with this package.. Parameters ---------- path : str The path to the JSON file. Returns ------- s : cls A som of the specified class.
def _http_request(url, request_timeout=None): _auth(url) try: request_timeout = __salt__['config.option']('solr.request_timeout') kwargs = {} if request_timeout is None else {'timeout': request_timeout} data = salt.utils.json.load(_urlopen(url, **kwargs)) return _get_return_dict(True, data, []) except Exception as err: return _get_return_dict(False, {}, ["{0} : {1}".format(url, err)])
PRIVATE METHOD Uses salt.utils.json.load to fetch the JSON results from the solr API. url : str a complete URL that can be passed to urllib.open request_timeout : int (None) The number of seconds before the timeout should fail. Leave blank/None to use the default. __opts__['solr.request_timeout'] Return: dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
def create_polynoms(): fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv') res_df = pd.read_csv(fname) polys = {} for resorder, row in zip('65 54 43 21'.split(), range(4)): p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']]) polys['janus ' + ':'.join(resorder)] = p return polys
Create and return poly1d objects. Uses the parameters from Morgan to create poly1d objects for calculations.
def implemented_methods(cls): if cls.__implemented_methods: return cls.__implemented_methods cls.__implemented_methods = {} for method in cls.callbacks: for op in getattr(method, 'swagger_ops'): cls.__implemented_methods[op] = method return cls.__implemented_methods
Return a mapping of implemented HTTP methods vs. their callbacks.
def challenge_auth(username, password, challenge, lower, digest='sha256'): def hdig(x): return fdigest(x).hexdigest() fdigest = get_digest(digest) luser = lower(username) tpass = password[:10].encode("ascii") hvalue = hdig("{0}:{1}".format(luser, hdig(tpass)).encode("ascii")) bhvalue = hvalue.encode("ascii") bchallenge = challenge.encode("ascii") return hmac.HMAC(bhvalue, bchallenge, digestmod=fdigest).hexdigest()
Calculates quakenet's challenge auth hash .. code-block:: python >>> challenge_auth("mooking", "0000000000", ... "12345678901234567890123456789012", str.lower, "md5") '2ed1a1f1d2cd5487d2e18f27213286b9'
def add_primary_text(self, item_url, primary_text): c = self.conn.cursor() c.execute("DELETE FROM primary_texts WHERE item_url=?", (str(item_url),)) self.conn.commit() c.execute("INSERT INTO primary_texts VALUES (?, ?, ?)", (str(item_url), primary_text, self.__now_iso_8601())) self.conn.commit() c.close()
Add the given primary text to the cache database, updating the existing record if the primary text is already present :type item_url: String or Item :param item_url: the URL of the corresponding item, or an Item object :type primary_text: String :param primary_text: the item's primary text
def _copy_scratch_to_state(args: Dict[str, Any]): np.copyto(_state_shard(args), _scratch_shard(args))
Copes scratch shards to state shards.
def parsePositionFile(filename): l=[] with open( filename, "rb" ) as theFile: reader = csv.DictReader( theFile ) for line in reader: mytime=dateparser.parse(line['time']) line['strtime']=mytime.strftime("%d %b %Y, %H:%M UTC") l.append(line) return l
Parses Android GPS logger csv file and returns list of dictionaries
def excise(self, ngrams, replacement): content = self.get_token_content() ngrams.sort(key=len, reverse=True) for ngram in ngrams: content = content.replace(ngram, replacement) return content
Returns the token content of this text with every occurrence of each n-gram in `ngrams` replaced with `replacement`. The replacing is performed on each n-gram by descending order of length. :param ngrams: n-grams to be replaced :type ngrams: `list` of `str` :param replacement: replacement string :type replacement: `str` :rtype: `str`
def pack(self): sn, sa = self.number, self.attribute return pack("<H", (sn & 0x3ff) << 6 | (sa & 0x3f))
Pack the service code for transmission. Returns a 2 byte string.
def postIncidents(self, name, message, status, visible, **kwargs): kwargs['name'] = name kwargs['message'] = message kwargs['status'] = status kwargs['visible'] = visible return self.__postRequest('/incidents', kwargs)
Create a new incident. :param name: Name of the incident :param message: A message (supporting Markdown) to explain more. :param status: Status of the incident. :param visible: Whether the incident is publicly visible. :param component_id: (optional) Component to update. :param component_status: (optional) The status to update the given component with. :param notify: (optional) Whether to notify subscribers. :return: :class:`Response <Response>` object :rtype: requests.Response
def _get_disksize_MiB(iLOIP, cred): result = _parse_mibs(iLOIP, cred) disksize = {} for uuid in sorted(result): for key in result[uuid]: if key.find('PhyDrvSize') >= 0: disksize[uuid] = dict() for suffix in sorted(result[uuid][key]): size = result[uuid][key][suffix] disksize[uuid][key] = str(size) return disksize
Reads the dictionary of parsed MIBs and gets the disk size. :param iLOIP: IP address of the server on which SNMP discovery has to be executed. :param snmp_credentials in a dictionary having following mandatory keys. auth_user: SNMP user auth_protocol: Auth Protocol auth_prot_pp: Pass phrase value for AuthProtocol. priv_protocol:Privacy Protocol. auth_priv_pp: Pass phrase value for Privacy Protocol. :returns the dictionary of disk sizes of all physical drives.
def process_request(self, request, client_address): self.collect_children() pid = os.fork() if pid: if self.active_children is None: self.active_children = [] self.active_children.append(pid) self.close_request(request) return else: try: self.finish_request(request, client_address) self.shutdown_request(request) os._exit(0) except: try: self.handle_error(request, client_address) self.shutdown_request(request) finally: os._exit(1)
Fork a new subprocess to process the request.
def getVersion(): print('epochs version:', str(CDFepoch.version) + '.' + str(CDFepoch.release) + '.'+str(CDFepoch.increment))
Shows the code version.
def guess_lexer_using_filename(file_name, text): lexer, accuracy = None, None try: lexer = custom_pygments_guess_lexer_for_filename(file_name, text) except SkipHeartbeat as ex: raise SkipHeartbeat(u(ex)) except: log.traceback(logging.DEBUG) if lexer is not None: try: accuracy = lexer.analyse_text(text) except: log.traceback(logging.DEBUG) return lexer, accuracy
Guess lexer for given text, limited to lexers for this file's extension. Returns a tuple of (lexer, accuracy).
def get_sequence(self, chrom, start, end, strand=None): if not self.index_dir: print("Index dir is not defined!") sys.exit() fasta_file = self.fasta_file[chrom] index_file = self.index_file[chrom] line_size = self.line_size[chrom] total_size = self.size[chrom] if start > total_size: raise ValueError( "Invalid start {0}, greater than sequence length {1} of {2}!".format(start, total_size, chrom)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError( "Invalid end {0}, greater than sequence length {1} of {2}!".format(end, total_size, chrom)) index = open(index_file, "rb") fasta = open(fasta_file) seq = self._read(index, fasta, start, end, line_size) index.close() fasta.close() if strand and strand == "-": seq = rc(seq) return seq
Retrieve a sequence
def to_json(self): data = json.dumps(self) out = u'{"%s":%s}' % (self.schema['title'], data) return out
put the object to json and remove the internal stuff salesking schema stores the type in the title
def inxsearch(self, r, g, b): dists = (self.colormap[:, :3] - np.array([r, g, b])) a = np.argmin((dists * dists).sum(1)) return a
Search for BGR values 0..255 and return colour index
def get(self, pid, record, **kwargs): etag = str(record.revision_id) self.check_etag(str(record.revision_id)) self.check_if_modified_since(record.updated, etag=etag) return self.make_response( pid, record, links_factory=self.links_factory )
Get a record. Permissions: ``read_permission_factory`` Procedure description: #. The record is resolved reading the pid value from the url. #. The ETag and If-Modifed-Since is checked. #. The HTTP response is built with the help of the link factory. :param pid: Persistent identifier for record. :param record: Record object. :returns: The requested record.
def _has_manual_kern_feature(font): return any(f for f in font.features if f.name == "kern" and not f.automatic)
Return true if the GSFont contains a manually written 'kern' feature.
def create_or_get_keypair(self, nova, keypair_name="testkey"): try: _keypair = nova.keypairs.get(keypair_name) self.log.debug('Keypair ({}) already exists, ' 'using it.'.format(keypair_name)) return _keypair except Exception: self.log.debug('Keypair ({}) does not exist, ' 'creating it.'.format(keypair_name)) _keypair = nova.keypairs.create(name=keypair_name) return _keypair
Create a new keypair, or return pointer if it already exists.
def _open(file,mode='copyonwrite'): import pyfits try: infits=pyfits.open(file,mode) hdu=infits except (ValueError,pyfits.VerifyError,pyfits.FITS_SevereError): import sys hdu=_open_fix(file) for f in hdu: strip_pad(f) return hdu
Opens a FITS format file and calls _open_fix if header doesn't verify correctly.
def _get_decision_trees_bulk(self, payload, valid_indices, invalid_indices, invalid_dts): valid_dts = self._create_and_send_json_bulk([payload[i] for i in valid_indices], "{}/bulk/decision_tree".format(self._base_url), "POST") if invalid_indices == []: return valid_dts return self._recreate_list_with_indices(valid_indices, valid_dts, invalid_indices, invalid_dts)
Tool for the function get_decision_trees_bulk. :param list payload: contains the informations necessary for getting the trees. Its form is the same than for the function. get_decision_trees_bulk. :param list valid_indices: list of the indices of the valid agent id. :param list invalid_indices: list of the indices of the valid agent id. :param list invalid_dts: list of the invalid agent id. :return: decision trees. :rtype: list of dict.
def nl_socket_modify_err_cb(sk, kind, func, arg): return int(nl_cb_err(sk.s_cb, kind, func, arg))
Modify the error callback handler associated with the socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/socket.c#L649 Positional arguments: sk -- Netlink socket (nl_sock class instance). kind -- kind of callback (integer). func -- callback function. arg -- argument to be passed to callback function. Returns: 0 on success or a negative error code.
def _get_keywords(self, location, keywords): if 'xml' in keywords: keywords.pop('xml') self.xml = True else: keywords['file_type'] = 'json' if 'id' in keywords: if location != 'series': location = location.rstrip('s') key = '%s_id' % location value = keywords.pop('id') keywords[key] = value if 'start' in keywords: time = keywords.pop('start') keywords['realtime_start'] = time if 'end' in keywords: time = keywords.pop('end') keywords['realtime_end'] = time if 'sort' in keywords: order = keywords.pop('sort') keywords['sort_order'] = order keywords['api_key'] = self.api_key return keywords
Format GET request's parameters from keywords.
def _remove_exts(self,string): if string.lower().endswith(('.png','.gif','.jpg','.bmp','.jpeg','.ppm','.datauri')): format = string[string.rfind('.') +1 :len(string)] if format.lower() == 'jpg': format = 'jpeg' self.format = format string = string[0:string.rfind('.')] return string
Sets the string, to create the Robohash
def deallocate_network_ipv4(self, id_network_ipv4): if not is_valid_int_param(id_network_ipv4): raise InvalidParameterError( u'The identifier of NetworkIPv4 is invalid or was not informed.') url = 'network/ipv4/' + str(id_network_ipv4) + '/deallocate/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
Deallocate all relationships between NetworkIPv4. :param id_network_ipv4: ID for NetworkIPv4 :return: Nothing :raise InvalidParameterError: Invalid ID for NetworkIPv4. :raise NetworkIPv4NotFoundError: NetworkIPv4 not found. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
async def run_asgi(self): try: result = await self.app(self.scope, self.asgi_receive, self.asgi_send) except BaseException as exc: self.closed_event.set() msg = "Exception in ASGI application\n" self.logger.error(msg, exc_info=exc) if not self.handshake_started_event.is_set(): self.send_500_response() else: await self.handshake_completed_event.wait() self.transport.close() else: self.closed_event.set() if not self.handshake_started_event.is_set(): msg = "ASGI callable returned without sending handshake." self.logger.error(msg) self.send_500_response() self.transport.close() elif result is not None: msg = "ASGI callable should return None, but returned '%s'." self.logger.error(msg, result) await self.handshake_completed_event.wait() self.transport.close()
Wrapper around the ASGI callable, handling exceptions and unexpected termination states.
def info(gandi): output_keys = ['handle', 'credit', 'prepaid'] account = gandi.account.all() account['prepaid_info'] = gandi.contact.balance().get('prepaid', {}) output_account(gandi, account, output_keys) return account
Display information about hosting account.
def package_version(package_name: str) -> typing.Optional[str]: try: return pkg_resources.get_distribution(package_name).version except (pkg_resources.DistributionNotFound, AttributeError): return None
Returns package version as a string, or None if it couldn't be found.
def multiply_encrypted_to_plaintext(public, encrypted, plaintext, output): log("Loading public key") publickeydata = json.load(public) pub = load_public_key(publickeydata) log("Loading encrypted number") enc = load_encrypted_number(encrypted, pub) log("Loading unencrypted number") num = float(plaintext) log("Multiplying") enc_result = enc * num serialised_result = serialise_encrypted(enc_result) print(serialised_result, file=output)
Multiply encrypted num with unencrypted num. Requires a PUBLIC key file, a number ENCRYPTED with that public key also as a file, and the PLAINTEXT number to multiply. Creates a new encrypted number.