code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def pagination_for(context, current_page, page_var="page", exclude_vars=""): """ Include the pagination template and data for persisting querystring in pagination links. Can also contain a comma separated string of var names in the current querystring to exclude from the pagination links, via the ``exclude_vars`` arg. """ querystring = context["request"].GET.copy() exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var] for exclude_var in exclude_vars: if exclude_var in querystring: del querystring[exclude_var] querystring = querystring.urlencode() return { "current_page": current_page, "querystring": querystring, "page_var": page_var, }
Include the pagination template and data for persisting querystring in pagination links. Can also contain a comma separated string of var names in the current querystring to exclude from the pagination links, via the ``exclude_vars`` arg.
def arquire_attributes(self, attributes, active=True): """ Claims a list of attributes for the current client. Can also disable attributes. Returns update response object. """ attribute_update = self._post_object(self.update_api.attributes.acquire, attributes) return ExistAttributeResponse(attribute_update)
Claims a list of attributes for the current client. Can also disable attributes. Returns update response object.
def conditional_gate(control: Qubit, gate0: Gate, gate1: Gate) -> Gate: """Return a conditional unitary gate. Do gate0 on bit 1 if bit 0 is zero, else do gate1 on 1""" assert gate0.qubits == gate1.qubits # FIXME tensor = join_gates(P0(control), gate0).tensor tensor += join_gates(P1(control), gate1).tensor gate = Gate(tensor=tensor, qubits=[control, *gate0.qubits]) return gate
Return a conditional unitary gate. Do gate0 on bit 1 if bit 0 is zero, else do gate1 on 1
def transmission_rate(self): """ Returns the upstream, downstream values as a tuple in bytes per second. Use this for periodical calling. """ sent = self.bytes_sent received = self.bytes_received traffic_call = time.time() time_delta = traffic_call - self.last_traffic_call upstream = int(1.0 * (sent - self.last_bytes_sent)/time_delta) downstream = int(1.0 * (received - self.last_bytes_received)/time_delta) self.last_bytes_sent = sent self.last_bytes_received = received self.last_traffic_call = traffic_call return upstream, downstream
Returns the upstream, downstream values as a tuple in bytes per second. Use this for periodical calling.
def encode_request(request_line, **headers): '''Creates the data for a SSDP request. Args: request_line (string): The request line for the request (e.g. ``"M-SEARCH * HTTP/1.1"``). headers (dict of string -> string): Dictionary of header name - header value pairs to present in the request. Returns: bytes: The encoded request. ''' lines = [request_line] lines.extend(['%s: %s' % kv for kv in headers.items()]) return ('\r\n'.join(lines) + '\r\n\r\n').encode('utf-8')
Creates the data for a SSDP request. Args: request_line (string): The request line for the request (e.g. ``"M-SEARCH * HTTP/1.1"``). headers (dict of string -> string): Dictionary of header name - header value pairs to present in the request. Returns: bytes: The encoded request.
def runGetReference(self, id_): """ Runs a getReference request for the specified ID. """ compoundId = datamodel.ReferenceCompoundId.parse(id_) referenceSet = self.getDataRepository().getReferenceSet( compoundId.reference_set_id) reference = referenceSet.getReference(id_) return self.runGetRequest(reference)
Runs a getReference request for the specified ID.
def status(**connection_args): ''' Return the status of a MySQL server using the output from the ``SHOW STATUS`` query. CLI Example: .. code-block:: bash salt '*' mysql.status ''' dbc = _connect(**connection_args) if dbc is None: return {} cur = dbc.cursor() qry = 'SHOW STATUS' try: _execute(cur, qry) except OperationalError as exc: err = 'MySQL Error {0}: {1}'.format(*exc.args) __context__['mysql.error'] = err log.error(err) return {} ret = {} for _ in range(cur.rowcount): row = cur.fetchone() ret[row[0]] = row[1] return ret
Return the status of a MySQL server using the output from the ``SHOW STATUS`` query. CLI Example: .. code-block:: bash salt '*' mysql.status
def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): """Non-differentiable Intersection over Union (IoU) for comparing the similarity of two batch of data, usually be used for evaluating binary image segmentation. The coefficient between 0 to 1, and 1 means totally match. Parameters ----------- output : tensor A batch of distribution with shape: [batch_size, ....], (any dimensions). target : tensor The target distribution, format the same with `output`. threshold : float The threshold value to be true. axis : tuple of integer All dimensions are reduced, default ``(1,2,3)``. smooth : float This small value will be added to the numerator and denominator, see ``dice_coe``. Notes ------ - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating. """ pre = tf.cast(output > threshold, dtype=tf.float32) truth = tf.cast(target > threshold, dtype=tf.float32) inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR # old axis=[0,1,2,3] # epsilon = 1e-5 # batch_iou = inse / (union + epsilon) # new haodong batch_iou = (inse + smooth) / (union + smooth) iou = tf.reduce_mean(batch_iou, name='iou_coe') return iou
Non-differentiable Intersection over Union (IoU) for comparing the similarity of two batch of data, usually be used for evaluating binary image segmentation. The coefficient between 0 to 1, and 1 means totally match. Parameters ----------- output : tensor A batch of distribution with shape: [batch_size, ....], (any dimensions). target : tensor The target distribution, format the same with `output`. threshold : float The threshold value to be true. axis : tuple of integer All dimensions are reduced, default ``(1,2,3)``. smooth : float This small value will be added to the numerator and denominator, see ``dice_coe``. Notes ------ - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
def human_size(size): """ Return a human-readable representation of a byte size. @param size: Number of bytes as an integer or string. @return: String of length 10 with the formatted result. """ if isinstance(size, string_types): size = int(size, 10) if size < 0: return "-??? bytes" if size < 1024: return "%4d bytes" % size for unit in ("KiB", "MiB", "GiB"): size /= 1024.0 if size < 1024: return "%6.1f %s" % (size, unit) return "%6.1f GiB" % size
Return a human-readable representation of a byte size. @param size: Number of bytes as an integer or string. @return: String of length 10 with the formatted result.
def is_analysis_edition_allowed(self, analysis_brain): """Returns if the analysis passed in can be edited by the current user :param analysis_brain: Brain that represents an analysis :return: True if the user can edit the analysis, otherwise False """ if not self.context_active: # The current context must be active. We cannot edit analyses from # inside a deactivated Analysis Request, for instance return False analysis_obj = api.get_object(analysis_brain) if analysis_obj.getPointOfCapture() == 'field': # This analysis must be captured on field, during sampling. if not self.has_permission(EditFieldResults, analysis_obj): # Current user cannot edit field analyses. return False elif not self.has_permission(EditResults, analysis_obj): # The Point of Capture is 'lab' and the current user cannot edit # lab analyses. return False # Check if the user is allowed to enter a value to to Result field if not self.has_permission(FieldEditAnalysisResult, analysis_obj): return False # Is the instrument out of date? # The user can assign a result to the analysis if it does not have any # instrument assigned or the instrument assigned is valid. if not self.is_analysis_instrument_valid(analysis_brain): # return if it is allowed to enter a manual result return analysis_obj.getManualEntryOfResults() return True
Returns if the analysis passed in can be edited by the current user :param analysis_brain: Brain that represents an analysis :return: True if the user can edit the analysis, otherwise False
def get_feature(self, ds, feat): """Return filtered feature data The features are filtered according to the user-defined filters, using the information in `ds._filter`. In addition, all `nan` and `inf` values are purged. Parameters ---------- ds: dclab.rtdc_dataset.RTDCBase The dataset containing the feature feat: str The name of the feature; must be a scalar feature """ if ds.config["filtering"]["enable filters"]: x = ds[feat][ds._filter] else: x = ds[feat] bad = np.isnan(x) | np.isinf(x) xout = x[~bad] return xout
Return filtered feature data The features are filtered according to the user-defined filters, using the information in `ds._filter`. In addition, all `nan` and `inf` values are purged. Parameters ---------- ds: dclab.rtdc_dataset.RTDCBase The dataset containing the feature feat: str The name of the feature; must be a scalar feature
def error(self, instance, value, error_class=None, extra=''): """Generates a ValueError on setting property to an invalid value""" error_class = error_class or ValidationError if not isinstance(value, (list, tuple, np.ndarray)): super(Array, self).error(instance, value, error_class, extra) if isinstance(value, (list, tuple)): val_description = 'A {typ} of length {len}'.format( typ=value.__class__.__name__, len=len(value) ) else: val_description = 'An array of shape {shp} and dtype {typ}'.format( shp=value.shape, typ=value.dtype ) if instance is None: prefix = '{} property'.format(self.__class__.__name__) else: prefix = "The '{name}' property of a {cls} instance".format( name=self.name, cls=instance.__class__.__name__, ) message = ( '{prefix} must be {info}. {desc} was specified. {extra}'.format( prefix=prefix, info=self.info, desc=val_description, extra=extra, ) ) if issubclass(error_class, ValidationError): raise error_class(message, 'invalid', self.name, instance) raise error_class(message)
Generates a ValueError on setting property to an invalid value
def connectToBroker(self, protocol): ''' Connect to MQTT broker ''' self.protocol = protocol self.protocol.onPublish = self.onPublish self.protocol.onDisconnection = self.onDisconnection self.protocol.setWindowSize(3) try: yield self.protocol.connect("TwistedMQTT-subs", keepalive=60) yield self.subscribe() except Exception as e: log.error("Connecting to {broker} raised {excp!s}", broker=BROKER, excp=e) else: log.info("Connected and subscribed to {broker}", broker=BROKER)
Connect to MQTT broker
def convertColors(element): """ Recursively converts all color properties into #RRGGBB format if shorter """ numBytes = 0 if element.nodeType != Node.ELEMENT_NODE: return 0 # set up list of color attributes for each element type attrsToConvert = [] if element.nodeName in ['rect', 'circle', 'ellipse', 'polygon', 'line', 'polyline', 'path', 'g', 'a']: attrsToConvert = ['fill', 'stroke'] elif element.nodeName in ['stop']: attrsToConvert = ['stop-color'] elif element.nodeName in ['solidColor']: attrsToConvert = ['solid-color'] # now convert all the color formats styles = _getStyle(element) for attr in attrsToConvert: oldColorValue = element.getAttribute(attr) if oldColorValue != '': newColorValue = convertColor(oldColorValue) oldBytes = len(oldColorValue) newBytes = len(newColorValue) if oldBytes > newBytes: element.setAttribute(attr, newColorValue) numBytes += (oldBytes - len(element.getAttribute(attr))) # colors might also hide in styles if attr in styles: oldColorValue = styles[attr] newColorValue = convertColor(oldColorValue) oldBytes = len(oldColorValue) newBytes = len(newColorValue) if oldBytes > newBytes: styles[attr] = newColorValue numBytes += (oldBytes - len(element.getAttribute(attr))) _setStyle(element, styles) # now recurse for our child elements for child in element.childNodes: numBytes += convertColors(child) return numBytes
Recursively converts all color properties into #RRGGBB format if shorter
def insert(parent: ScheduleComponent, time: int, child: ScheduleComponent, name: str = None) -> Schedule: """Return a new schedule with the `child` schedule inserted into the `parent` at `start_time`. Args: parent: Schedule to be inserted into time: Time to be inserted defined with respect to `parent` child: Schedule to insert name: Name of the new schedule. Defaults to name of parent """ return union(parent, (time, child), name=name)
Return a new schedule with the `child` schedule inserted into the `parent` at `start_time`. Args: parent: Schedule to be inserted into time: Time to be inserted defined with respect to `parent` child: Schedule to insert name: Name of the new schedule. Defaults to name of parent
def provideCustomerReferralCode(sender,**kwargs): ''' If the vouchers app is installed and referrals are enabled, then the customer's profile page can show their voucher referral code. ''' customer = kwargs.pop('customer') if getConstant('vouchers__enableVouchers') and getConstant('referrals__enableReferralProgram'): vrd = ensureReferralVouchersExist(customer) return { 'referralVoucherId': vrd.referreeVoucher.voucherId }
If the vouchers app is installed and referrals are enabled, then the customer's profile page can show their voucher referral code.
def _update_statuses(self, sub_job_num=None): """ Update statuses of jobs nodes in workflow. """ # initialize status dictionary status_dict = dict() for val in CONDOR_JOB_STATUSES.values(): status_dict[val] = 0 for node in self.node_set: job = node.job try: job_status = job.status status_dict[job_status] += 1 except (KeyError, HTCondorError): status_dict['Unexpanded'] += 1 return status_dict
Update statuses of jobs nodes in workflow.
def runserver(ctx, conf, port, foreground): """Run the fnExchange server""" config = read_config(conf) debug = config['conf'].get('debug', False) click.echo('Debug mode {0}.'.format('on' if debug else 'off')) port = port or config['conf']['server']['port'] app_settings = { 'debug': debug, 'auto_reload': config['conf']['server'].get('auto_reload', False), } handlers_settings = __create_handler_settings(config) if foreground: click.echo('Requested mode: foreground') start_app(port, app_settings, handlers_settings) else: click.echo('Requested mode: background') # subprocess.call([sys.executable, 'yourscript.py'], env=os.environ.copy()) raise NotImplementedError
Run the fnExchange server
def p_const_vector_vector_list(p): """ const_vector_list : const_vector_list COMMA const_vector """ if len(p[3]) != len(p[1][0]): syntax_error(p.lineno(2), 'All rows must have the same number of elements') p[0] = None return p[0] = p[1] + [p[3]]
const_vector_list : const_vector_list COMMA const_vector
def string_to_int( s ): """Convert a string of bytes into an integer, as per X9.62.""" result = 0 for c in s: if not isinstance(c, int): c = ord( c ) result = 256 * result + c return result
Convert a string of bytes into an integer, as per X9.62.
def validate(self, value, model=None, context=None): """ Perform validation """ from boiler.user.services import user_service self_id = None if model: if isinstance(model, dict): self_id = model.get('id') else: self_id = getattr(model, 'id') params = dict() params[self.property] = value found = user_service.first(**params) if not found or (model and self_id == found.id): return Error() return Error(self.error)
Perform validation
def _execute(self, query, commit=False, working_columns=None): """ Execute a query with provided parameters Parameters :query: SQL string with parameter placeholders :commit: If True, the query will commit :returns: List of rows """ log.debug("RawlBase._execute()") result = [] if working_columns is None: working_columns = self.columns with RawlConnection(self.dsn) as conn: query_id = random.randrange(9999) curs = conn.cursor() try: log.debug("Executing(%s): %s" % (query_id, query.as_string(curs))) except: log.exception("LOGGING EXCEPTION LOL") curs.execute(query) log.debug("Executed") if commit == True: log.debug("COMMIT(%s)" % query_id) conn.commit() log.debug("curs.rowcount: %s" % curs.rowcount) if curs.rowcount > 0: #result = curs.fetchall() # Process the results into a dict and stuff it in a RawlResult # object. Then append that object to result result_rows = curs.fetchall() for row in result_rows: i = 0 row_dict = {} for col in working_columns: try: #log.debug("row_dict[%s] = row[%s] which is %s" % (col, i, row[i])) # For aliased columns, we need to get rid of the dot col = col.replace('.', '_') row_dict[col] = row[i] except IndexError: pass i += 1 log.debug("Appending dict to result: %s" % row_dict) rr = RawlResult(working_columns, row_dict) result.append(rr) curs.close() return result
Execute a query with provided parameters Parameters :query: SQL string with parameter placeholders :commit: If True, the query will commit :returns: List of rows
def configure_logging(info=False, debug=False): """Configure logging The function configures log messages. By default, log messages are sent to stderr. Set the parameter `debug` to activate the debug mode. :param debug: set the debug mode """ if info: logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urrlib3').setLevel(logging.WARNING) logging.getLogger('elasticsearch').setLevel(logging.WARNING) elif debug: logging.basicConfig(level=logging.DEBUG, format=DEBUG_LOG_FORMAT) else: logging.basicConfig(level=logging.WARNING, format=LOG_FORMAT) logging.getLogger('requests').setLevel(logging.WARNING) logging.getLogger('urrlib3').setLevel(logging.WARNING) logging.getLogger('elasticsearch').setLevel(logging.WARNING)
Configure logging The function configures log messages. By default, log messages are sent to stderr. Set the parameter `debug` to activate the debug mode. :param debug: set the debug mode
def cwd(self, new_path): '''Sets the cwd during reads and writes''' old_cwd = self._cwd self._cwd = new_path return old_cwd
Sets the cwd during reads and writes
def freeSave(self, obj) : """THIS IS WHERE COMMITS TAKE PLACE! Ends a saving session, only the initiator can end a session. The commit is performed at the end of the session""" if self.saveIniator is obj and not self.inTransaction : self.saveIniator = None self.savedObject = set() self.connection.commit() return True return False
THIS IS WHERE COMMITS TAKE PLACE! Ends a saving session, only the initiator can end a session. The commit is performed at the end of the session
def check_params(**kwargs): """check_params: check whether some parameters are missing """ missing_params = [] check = True for param in kwargs: if kwargs[param] is None: missing_params.append(param) if len(missing_params) > 0: print("POT - Warning: following necessary parameters are missing") for p in missing_params: print("\n", p) check = False return check
check_params: check whether some parameters are missing
def _hjoin_multiline(join_char, strings): """Horizontal join of multiline strings """ cstrings = [string.split("\n") for string in strings] max_num_lines = max(len(item) for item in cstrings) pp = [] for k in range(max_num_lines): p = [cstring[k] for cstring in cstrings] pp.append(join_char + join_char.join(p) + join_char) return "\n".join([p.rstrip() for p in pp])
Horizontal join of multiline strings
def add_int(self, name, min, max, warp=None): """An integer-valued dimension bounded between `min` <= x <= `max`. Note that the right endpoint of the interval includes `max`. When `warp` is None, the base measure associated with this dimension is a categorical distribution with each weight on each of the integers in [min, max]. With `warp == 'log'`, the base measure is a uniform distribution on the log of the variable, with bounds at `log(min)` and `log(max)`. This is appropriate for variables that are "naturally" in log-space. Other `warp` functions are not supported (yet), but may be at a later time. Please note that this functionality is not supported for `hyperopt_tpe`. """ min, max = map(int, (min, max)) if max < min: raise ValueError('variable %s: max < min error' % name) if warp not in (None, 'log'): raise ValueError('variable %s: warp=%s is not supported. use ' 'None or "log",' % (name, warp)) if min <= 0 and warp == 'log': raise ValueError('variable %s: log-warping requires min > 0') self.variables[name] = IntVariable(name, min, max, warp)
An integer-valued dimension bounded between `min` <= x <= `max`. Note that the right endpoint of the interval includes `max`. When `warp` is None, the base measure associated with this dimension is a categorical distribution with each weight on each of the integers in [min, max]. With `warp == 'log'`, the base measure is a uniform distribution on the log of the variable, with bounds at `log(min)` and `log(max)`. This is appropriate for variables that are "naturally" in log-space. Other `warp` functions are not supported (yet), but may be at a later time. Please note that this functionality is not supported for `hyperopt_tpe`.
def get_authenticated_user(self, redirect_uri, callback, scope=None, **args): """ class RenrenHandler(tornado.web.RequestHandler, RenrenGraphMixin): @tornado.web.asynchronous @gen.engine def get(self): self.get_authenticated_user( callback=(yield gen.Callback('key')), redirect_uri=url) user = yield gen.Wait('key') if not user: raise web.HTTPError(500, "Renren auth failed") # do something else self.finish() """ code = self.get_argument('code', None) if not code: self.authorize_redirect(redirect_uri, scope=scope, **args) return self.get_access_token( code, callback=(yield gen.Callback('_RenrenGraphMixin.get_authenticated_user')), redirect_uri=redirect_uri) response = yield gen.Wait('_RenrenGraphMixin.get_authenticated_user') if not response: callback(None) return try: user = json_decode(response.body) except: logging.warning("Error response %s fetching %s", response.body, response.request.url) callback(None) return if 'error' in user: logging.warning("Error response %s fetching %s", user['error_description'], response.request.url) callback(None) return #{{{ get session key self.renren_request('renren_api/session_key', user['access_token'], callback=(yield gen.Callback('_RenrenGraphMixin._session_key'))) response = yield gen.Wait('_RenrenGraphMixin._session_key') if response.error and not response.body: logging.warning("Error response %s fetching %s", response.error, response.request.url) elif response.error: logging.warning("Error response %s fetching %s: %s", response.error, response.request.url, response.body) else: try: user['session'] = json_decode(response.body) except: pass #}}} #TODO delete when renren graph api released callback(user) return
class RenrenHandler(tornado.web.RequestHandler, RenrenGraphMixin): @tornado.web.asynchronous @gen.engine def get(self): self.get_authenticated_user( callback=(yield gen.Callback('key')), redirect_uri=url) user = yield gen.Wait('key') if not user: raise web.HTTPError(500, "Renren auth failed") # do something else self.finish()
async def cancel_task(app: web.Application, task: asyncio.Task, *args, **kwargs ) -> Any: """ Convenience function for calling `TaskScheduler.cancel(task)` This will use the default `TaskScheduler` to cancel the given task. Example: import asyncio from datetime import datetime from brewblox_service import scheduler, service async def current_time(interval): while True: await asyncio.sleep(interval) print(datetime.now()) async def stop_after(app, task, duration): await asyncio.sleep(duration) await scheduler.cancel_task(app, task) print('stopped!') async def start(app): # Start first task task = await scheduler.create_task(app, current_time(interval=2)) # Start second task to stop the first await scheduler.create_task(app, stop_after(app, task, duration=10)) app = service.create_app(default_name='example') scheduler.setup(app) app.on_startup.append(start) service.furnish(app) service.run(app) """ return await get_scheduler(app).cancel(task, *args, **kwargs)
Convenience function for calling `TaskScheduler.cancel(task)` This will use the default `TaskScheduler` to cancel the given task. Example: import asyncio from datetime import datetime from brewblox_service import scheduler, service async def current_time(interval): while True: await asyncio.sleep(interval) print(datetime.now()) async def stop_after(app, task, duration): await asyncio.sleep(duration) await scheduler.cancel_task(app, task) print('stopped!') async def start(app): # Start first task task = await scheduler.create_task(app, current_time(interval=2)) # Start second task to stop the first await scheduler.create_task(app, stop_after(app, task, duration=10)) app = service.create_app(default_name='example') scheduler.setup(app) app.on_startup.append(start) service.furnish(app) service.run(app)
def _organize_variants(samples, batch_id): """Retrieve variant calls for all samples, merging batched samples into single VCF. """ caller_names = [x["variantcaller"] for x in samples[0]["variants"]] calls = collections.defaultdict(list) for data in samples: for vrn in data["variants"]: calls[vrn["variantcaller"]].append(vrn["vrn_file"]) data = samples[0] vrn_files = [] for caller in caller_names: fnames = calls[caller] if len(fnames) == 1: vrn_files.append(fnames[0]) else: vrn_files.append(population.get_multisample_vcf(fnames, batch_id, caller, data)) return caller_names, vrn_files
Retrieve variant calls for all samples, merging batched samples into single VCF.
def clone(self, new_object): """ Returns an object that re-binds the underlying "method" to the specified new object. """ return self.__class__(new_object, self.method, self.name)
Returns an object that re-binds the underlying "method" to the specified new object.
def response(self, model=None, code=HTTPStatus.OK, description=None, **kwargs): """ Endpoint response OpenAPI documentation decorator. It automatically documents HTTPError%(code)d responses with relevant schemas. Arguments: model (flask_marshmallow.Schema) - it can be a class or an instance of the class, which will be used for OpenAPI documentation purposes. It can be omitted if ``code`` argument is set to an error HTTP status code. code (int) - HTTP status code which is documented. description (str) Example: >>> @namespace.response(BaseTeamSchema(many=True)) ... @namespace.response(code=HTTPStatus.FORBIDDEN) ... def get_teams(): ... if not user.is_admin: ... abort(HTTPStatus.FORBIDDEN) ... return Team.query.all() """ code = HTTPStatus(code) if code is HTTPStatus.NO_CONTENT: assert model is None if model is None and code not in {HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT}: if code.value not in http_exceptions.default_exceptions: raise ValueError("`model` parameter is required for code %d" % code) model = self.model( name='HTTPError%d' % code, model=DefaultHTTPErrorSchema(http_code=code) ) if description is None: description = code.description def response_serializer_decorator(func): """ This decorator handles responses to serialize the returned value with a given model. """ def dump_wrapper(*args, **kwargs): # pylint: disable=missing-docstring response = func(*args, **kwargs) extra_headers = None if response is None: if model is not None: raise ValueError("Response cannot not be None with HTTP status %d" % code) return flask.Response(status=code) elif isinstance(response, flask.Response) or model is None: return response elif isinstance(response, tuple): response, _code, extra_headers = unpack(response) else: _code = code if HTTPStatus(_code) is code: response = model.dump(response).data return response, _code, extra_headers return dump_wrapper def decorator(func_or_class): if code.value in http_exceptions.default_exceptions: # If the code is handled by raising an exception, it will # produce a response later, so we don't need to apply a useless # wrapper. decorated_func_or_class = func_or_class elif isinstance(func_or_class, type): # Handle Resource classes decoration # pylint: disable=protected-access func_or_class._apply_decorator_to_methods(response_serializer_decorator) decorated_func_or_class = func_or_class else: decorated_func_or_class = wraps(func_or_class)( response_serializer_decorator(func_or_class) ) if model is None: api_model = None else: if isinstance(model, Model): api_model = model else: api_model = self.model(model=model) if getattr(model, 'many', False): api_model = [api_model] doc_decorator = self.doc( responses={ code.value: (description, api_model) } ) return doc_decorator(decorated_func_or_class) return decorator
Endpoint response OpenAPI documentation decorator. It automatically documents HTTPError%(code)d responses with relevant schemas. Arguments: model (flask_marshmallow.Schema) - it can be a class or an instance of the class, which will be used for OpenAPI documentation purposes. It can be omitted if ``code`` argument is set to an error HTTP status code. code (int) - HTTP status code which is documented. description (str) Example: >>> @namespace.response(BaseTeamSchema(many=True)) ... @namespace.response(code=HTTPStatus.FORBIDDEN) ... def get_teams(): ... if not user.is_admin: ... abort(HTTPStatus.FORBIDDEN) ... return Team.query.all()
def get_template_sources(self, template_name, template_dirs=None): """ Returns the absolute paths to "template_name", when appended to each directory in "template_dirs". Any paths that don't lie inside one of the template dirs are excluded from the result set, for security reasons. """ if not template_dirs: template_dirs = self.get_dirs() for template_dir in template_dirs: try: name = safe_join(template_dir, template_name) except SuspiciousFileOperation: # The joined path was located outside of this template_dir # (it might be inside another one, so this isn't fatal). pass else: if Origin: yield Origin( name=name, template_name=template_name, loader=self, ) else: yield name
Returns the absolute paths to "template_name", when appended to each directory in "template_dirs". Any paths that don't lie inside one of the template dirs are excluded from the result set, for security reasons.
def doc(self): """ Formats a table of documentation strings to help users remember variable names, and understand how they are translated into python safe names. Returns ------- docs_df: pandas dataframe Dataframe with columns for the model components: - Real names - Python safe identifiers (as used in model.components) - Units string - Documentation strings from the original model file """ collector = [] for name, varname in self.components._namespace.items(): try: docstring = getattr(self.components, varname).__doc__ lines = docstring.split('\n') collector.append({'Real Name': name, 'Py Name': varname, 'Eqn': lines[2].replace("Original Eqn:", "").strip(), 'Unit': lines[3].replace("Units:", "").strip(), 'Lims': lines[4].replace("Limits:", "").strip(), 'Type': lines[5].replace("Type:", "").strip(), 'Comment': '\n'.join(lines[7:]).strip()}) except: pass docs_df = _pd.DataFrame(collector) docs_df.fillna('None', inplace=True) order = ['Real Name', 'Py Name', 'Unit', 'Lims', 'Type', 'Eqn', 'Comment'] return docs_df[order].sort_values(by='Real Name').reset_index(drop=True)
Formats a table of documentation strings to help users remember variable names, and understand how they are translated into python safe names. Returns ------- docs_df: pandas dataframe Dataframe with columns for the model components: - Real names - Python safe identifiers (as used in model.components) - Units string - Documentation strings from the original model file
def statuses_show(self, id, trim_user=None, include_my_retweet=None, include_entities=None): """ Returns a single Tweet, specified by the id parameter. https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid :param str id: (*required*) The numerical ID of the desired tweet. :param bool trim_user: When set to ``True``, the tweet's user object includes only the status author's numerical ID. :param bool include_my_retweet: When set to ``True``, any Tweet returned that has been retweeted by the authenticating user will include an additional ``current_user_retweet`` node, containing the ID of the source status for the retweet. :param bool include_entities: When set to ``False``, the ``entities`` node will not be included. :returns: A tweet dict. """ params = {'id': id} set_bool_param(params, 'trim_user', trim_user) set_bool_param(params, 'include_my_retweet', include_my_retweet) set_bool_param(params, 'include_entities', include_entities) return self._get_api('statuses/show.json', params)
Returns a single Tweet, specified by the id parameter. https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid :param str id: (*required*) The numerical ID of the desired tweet. :param bool trim_user: When set to ``True``, the tweet's user object includes only the status author's numerical ID. :param bool include_my_retweet: When set to ``True``, any Tweet returned that has been retweeted by the authenticating user will include an additional ``current_user_retweet`` node, containing the ID of the source status for the retweet. :param bool include_entities: When set to ``False``, the ``entities`` node will not be included. :returns: A tweet dict.
def get_files(self): """ :calls: `GET /repos/:owner/:repo/pulls/:number/files <http://developer.github.com/v3/pulls>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.File.File` """ return github.PaginatedList.PaginatedList( github.File.File, self._requester, self.url + "/files", None )
:calls: `GET /repos/:owner/:repo/pulls/:number/files <http://developer.github.com/v3/pulls>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.File.File`
def create_ellipse(self,xcen,ycen,a,b,ang,resolution=40.0): """Plot ellipse at x,y with size a,b and orientation ang""" import math e1=[] e2=[] ang=ang-math.radians(90) for i in range(0,int(resolution)+1): x=(-1*a+2*a*float(i)/resolution) y=1-(x/a)**2 if y < 1E-6: y=1E-6 y=math.sqrt(y)*b ptv=self.p2c((x*math.cos(ang)+y*math.sin(ang)+xcen,y*math.cos(ang)-x*math.sin(ang)+ycen)) y=-1*y ntv=self.p2c((x*math.cos(ang)+y*math.sin(ang)+xcen,y*math.cos(ang)-x*math.sin(ang)+ycen)) e1.append(ptv) e2.append(ntv) e2.reverse() e1.extend(e2) self.create_line(e1,fill='red',width=1)
Plot ellipse at x,y with size a,b and orientation ang
def decryptWithSessionRecord(self, sessionRecord, cipherText): """ :type sessionRecord: SessionRecord :type cipherText: WhisperMessage """ previousStates = sessionRecord.getPreviousSessionStates() exceptions = [] try: sessionState = SessionState(sessionRecord.getSessionState()) plaintext = self.decryptWithSessionState(sessionState, cipherText) sessionRecord.setState(sessionState) return plaintext except InvalidMessageException as e: exceptions.append(e) for i in range(0, len(previousStates)): previousState = previousStates[i] try: promotedState = SessionState(previousState) plaintext = self.decryptWithSessionState(promotedState, cipherText) previousStates.pop(i) sessionRecord.promoteState(promotedState) return plaintext except InvalidMessageException as e: exceptions.append(e) raise InvalidMessageException("No valid sessions", exceptions)
:type sessionRecord: SessionRecord :type cipherText: WhisperMessage
def deploy_image(self, image_name, oc_new_app_args=None, project=None, name=None): """ Deploy image in OpenShift cluster using 'oc new-app' :param image_name: image name with tag :param oc_new_app_args: additional parameters for the `oc new-app`, env variables etc. :param project: project where app should be created, default: current project :param name:str, name of application, if None random name is generated :return: str, name of the app """ self.project = project or self.get_current_project() # app name is generated randomly name = name or 'app-{random_string}'.format(random_string=random_str(5)) oc_new_app_args = oc_new_app_args or [] new_image = self.import_image(image_name.split('/')[-1], image_name) c = self._oc_command( ["new-app"] + oc_new_app_args + [new_image] + ["-n"] + [project] + ["--name=%s" % name]) logger.info("Creating new app in project %s", project) try: run_cmd(c) except subprocess.CalledProcessError as ex: raise ConuException("oc new-app failed: %s" % ex) return name
Deploy image in OpenShift cluster using 'oc new-app' :param image_name: image name with tag :param oc_new_app_args: additional parameters for the `oc new-app`, env variables etc. :param project: project where app should be created, default: current project :param name:str, name of application, if None random name is generated :return: str, name of the app
def create_or_bind_with_claims(self, source_identity): """CreateOrBindWithClaims. [Preview API] :param :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>` source_identity: :rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>` """ content = self._serialize.body(source_identity, 'Identity') response = self._send(http_method='PUT', location_id='90ddfe71-171c-446c-bf3b-b597cd562afd', version='5.0-preview.1', content=content) return self._deserialize('Identity', response)
CreateOrBindWithClaims. [Preview API] :param :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>` source_identity: :rtype: :class:`<Identity> <azure.devops.v5_0.identity.models.Identity>`
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_algorithm_header: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: if pattern.search(line): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_dict.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template)
Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection.
def fit(self, x, y, **kwargs): """ Fit a naive model :param x: Predictors to use for fitting the data (this will not be used in naive models) :param y: Outcome """ self.mean = numpy.mean(y) return {}
Fit a naive model :param x: Predictors to use for fitting the data (this will not be used in naive models) :param y: Outcome
def _validate(self, inst: "InstanceNode", scope: ValidationScope, ctype: ContentType) -> None: """Extend the superclass method.""" if (scope.value & ValidationScope.syntax.value and inst.value not in self.type): raise YangTypeError(inst.json_pointer(), self.type.error_tag, self.type.error_message) if (isinstance(self.type, LinkType) and # referential integrity scope.value & ValidationScope.semantics.value and self.type.require_instance): try: tgt = inst._deref() except YangsonException: tgt = [] if not tgt: raise SemanticError(inst.json_pointer(), "instance-required") super()._validate(inst, scope, ctype)
Extend the superclass method.
def setup_driver(scenario): """Scenario initialization :param scenario: running scenario """ if not hasattr(world, 'config_files'): world.config_files = ConfigFiles() # By default config directory is located in terrain path if not world.config_files.config_directory: world.config_files.set_config_directory(DriverWrappersPool.get_default_config_directory()) world.global_status = {'test_passed': True} bdd_common_before_scenario(world, scenario)
Scenario initialization :param scenario: running scenario
def PCA(x, n=False): """ Principal component analysis function. **Args:** * `x` : input matrix (2d array), every row represents new sample **Kwargs:** * `n` : number of features returned (integer) - how many columns should the output keep **Returns:** * `new_x` : matrix with reduced size (lower number of columns) """ # select n if not provided if not n: n = x.shape[1] - 1 # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') assert type(n) == int, "Provided n is not an integer." assert x.shape[1] > n, "The requested n is bigger than \ number of features in x." # eigen values and eigen vectors of data covariance matrix eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T)) # sort eigen vectors according biggest eigen value eigen_order = eigen_vectors.T[(-eigen_values).argsort()] # form output - reduced x matrix return eigen_order[:n].dot(x.T).T
Principal component analysis function. **Args:** * `x` : input matrix (2d array), every row represents new sample **Kwargs:** * `n` : number of features returned (integer) - how many columns should the output keep **Returns:** * `new_x` : matrix with reduced size (lower number of columns)
def get_filestats(cluster, environ, topology, container, path, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(FILESTATS_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
:param cluster: :param environ: :param topology: :param container: :param path: :param role: :return:
def _set_traffic_class_mutation(self, v, load=False): """ Setter method for traffic_class_mutation, mapped from YANG variable /qos/map/traffic_class_mutation (list) If this variable is read-only (config: false) in the source YANG file, then _set_traffic_class_mutation is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_traffic_class_mutation() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",traffic_class_mutation.traffic_class_mutation, yang_name="traffic-class-mutation", rest_name="traffic-class-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_mutation_map', u'cli-mode-name': u'traffic-class-mutation-$(name)'}}), is_container='list', yang_name="traffic-class-mutation", rest_name="traffic-class-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_mutation_map', u'cli-mode-name': u'traffic-class-mutation-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """traffic_class_mutation must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",traffic_class_mutation.traffic_class_mutation, yang_name="traffic-class-mutation", rest_name="traffic-class-mutation", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_mutation_map', u'cli-mode-name': u'traffic-class-mutation-$(name)'}}), is_container='list', yang_name="traffic-class-mutation", rest_name="traffic-class-mutation", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Traffic-Class-Mutation map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'traffic_class_mutation_map', u'cli-mode-name': u'traffic-class-mutation-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""", }) self.__traffic_class_mutation = t if hasattr(self, '_set'): self._set()
Setter method for traffic_class_mutation, mapped from YANG variable /qos/map/traffic_class_mutation (list) If this variable is read-only (config: false) in the source YANG file, then _set_traffic_class_mutation is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_traffic_class_mutation() directly.
def create_credentials(self, environment_id, source_type=None, credential_details=None, **kwargs): """ Create credentials. Creates a set of credentials to connect to a remote source. Created credentials are used in a configuration to associate a collection with the remote source. **Note:** All credentials are sent over an encrypted connection and encrypted at rest. :param str environment_id: The ID of the environment. :param str source_type: The source that this credentials object connects to. - `box` indicates the credentials are used to connect an instance of Enterprise Box. - `salesforce` indicates the credentials are used to connect to Salesforce. - `sharepoint` indicates the credentials are used to connect to Microsoft SharePoint Online. - `web_crawl` indicates the credentials are used to perform a web crawl. = `cloud_object_storage` indicates the credentials are used to connect to an IBM Cloud Object Store. :param CredentialDetails credential_details: Object containing details of the stored credentials. Obtain credentials for your source from the administrator of the source. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if environment_id is None: raise ValueError('environment_id must be provided') if credential_details is not None: credential_details = self._convert_model(credential_details, CredentialDetails) headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'create_credentials') headers.update(sdk_headers) params = {'version': self.version} data = { 'source_type': source_type, 'credential_details': credential_details } url = '/v1/environments/{0}/credentials'.format( *self._encode_path_vars(environment_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
Create credentials. Creates a set of credentials to connect to a remote source. Created credentials are used in a configuration to associate a collection with the remote source. **Note:** All credentials are sent over an encrypted connection and encrypted at rest. :param str environment_id: The ID of the environment. :param str source_type: The source that this credentials object connects to. - `box` indicates the credentials are used to connect an instance of Enterprise Box. - `salesforce` indicates the credentials are used to connect to Salesforce. - `sharepoint` indicates the credentials are used to connect to Microsoft SharePoint Online. - `web_crawl` indicates the credentials are used to perform a web crawl. = `cloud_object_storage` indicates the credentials are used to connect to an IBM Cloud Object Store. :param CredentialDetails credential_details: Object containing details of the stored credentials. Obtain credentials for your source from the administrator of the source. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
def update_status(self, card_id, code, reimburse_status): """ 更新发票卡券的状态 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param card_id: 发票卡券模板的编号 :param code: 发票卡券的编号 :param reimburse_status: 发票报销状态 """ return self._post( 'platform/updatestatus', data={ 'card_id': card_id, 'code': code, 'reimburse_status': reimburse_status, }, )
更新发票卡券的状态 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param card_id: 发票卡券模板的编号 :param code: 发票卡券的编号 :param reimburse_status: 发票报销状态
def _sorted_keys(self): """ Return list of keys sorted by version Sorting is done based on :py:func:`pkg_resources.parse_version` """ try: keys = self._cache['sorted_keys'] except KeyError: keys = self._cache['sorted_keys'] = sorted(self.keys(), key=parse_version) return keys
Return list of keys sorted by version Sorting is done based on :py:func:`pkg_resources.parse_version`
def update_entity(self, entity, if_match='*'): ''' Adds an update entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.update_entity` for more information on updates. The operation will not be executed until the batch is committed. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*). ''' request = _update_entity(entity, if_match, self._require_encryption, self._key_encryption_key, self._encryption_resolver) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
Adds an update entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.update_entity` for more information on updates. The operation will not be executed until the batch is committed. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*).
def uninstalled(name): ''' Ensure that the named package is not installed. Args: name (str): The flatpak package. Returns: dict: The ``result`` and ``output``. Example: .. code-block:: yaml uninstall_package: flatpack.uninstalled: - name: gimp ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} old = __salt__['flatpak.is_installed'](name) if not old: ret['comment'] = 'Package {0} is not installed'.format(name) ret['result'] = True return ret else: if __opts__['test']: ret['comment'] = 'Package {0} would have been uninstalled'.format(name) ret['changes']['old'] = old[0]['version'] ret['changes']['new'] = None ret['result'] = None return ret __salt__['flatpak.uninstall'](name) if not __salt__['flatpak.is_installed'](name): ret['comment'] = 'Package {0} uninstalled'.format(name) ret['changes']['old'] = old[0]['version'] ret['changes']['new'] = None ret['result'] = True return ret
Ensure that the named package is not installed. Args: name (str): The flatpak package. Returns: dict: The ``result`` and ``output``. Example: .. code-block:: yaml uninstall_package: flatpack.uninstalled: - name: gimp
def typed_node_from_id(id: str) -> TypedNode: """ Get typed node from id :param id: id as curie :return: TypedNode object """ filter_out_types = [ 'cliqueLeader', 'Class', 'Node', 'Individual', 'quality', 'sequence feature' ] node = next(get_scigraph_nodes([id])) if 'lbl' in node: label = node['lbl'] else: label = None # Empty string or None? types = [typ.lower() for typ in node['meta']['types'] if typ not in filter_out_types] return TypedNode( id=node['id'], label=label, type=types[0], taxon = get_taxon(id) )
Get typed node from id :param id: id as curie :return: TypedNode object
def clear(self): """ Clears the display and resets the cursor position to ``(0, 0)``. """ self._cx, self._cy = (0, 0) self._canvas.rectangle(self._device.bounding_box, fill=self.default_bgcolor) self.flush()
Clears the display and resets the cursor position to ``(0, 0)``.
def set_attrs(self, **attrs): """Set model attributes, e.g. input resistance of a cell.""" self.attrs.update(attrs) self._backend.set_attrs(**attrs)
Set model attributes, e.g. input resistance of a cell.
def dre_dc(self, pars): r""" :math:Add formula """ self._set_parameters(pars) # term 1 num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang) num1b = self.otc * np.cos(self.ang) * np.pi / 2.0 term1 = (num1a + num1b) / self.denom # term 2 num2 = self.otc * np.sin(self.c / np.pi) * 2 denom2 = self.denom ** 2 term2 = num2 / denom2 # term 3 num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang) num3c = 2 * np.log(self.w * self.tau) * self.otc2 term3 = num3a - num3b + num3c result = self.sigmai * self.m * (term1 + term2 * term3) return result
r""" :math:Add formula
def _capitalize_word(text, pos): """Capitalize the current (or following) word.""" while pos < len(text) and not text[pos].isalnum(): pos += 1 if pos < len(text): text = text[:pos] + text[pos].upper() + text[pos + 1:] while pos < len(text) and text[pos].isalnum(): pos += 1 return text, pos
Capitalize the current (or following) word.
def get_class_from_settings(settings_key): """Gets a class from a setting key. This will first check loaded models, then look in installed apps, then fallback to import from lib. :param settings_key: the key defined in settings to the value for """ cls_path = getattr(settings, settings_key, None) if not cls_path: raise NotImplementedError() try: # First check to see if it's an installed model return get_model_from_settings(settings_key=settings_key) except: try: # Next, check from installed apps return get_class_from_settings_from_apps(settings_key=settings_key) except: # Last, try to load from the full path return get_class_from_settings_full_path(settings_key)
Gets a class from a setting key. This will first check loaded models, then look in installed apps, then fallback to import from lib. :param settings_key: the key defined in settings to the value for
def generate_wildcard_pem_bytes(): """ Generate a wildcard (subject name '*') self-signed certificate valid for 10 years. https://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate :return: Bytes representation of the PEM certificate data """ key = generate_private_key(u'rsa') name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, u'*')]) cert = ( x509.CertificateBuilder() .issuer_name(name) .subject_name(name) .not_valid_before(datetime.today() - timedelta(days=1)) .not_valid_after(datetime.now() + timedelta(days=3650)) .serial_number(int(uuid.uuid4())) .public_key(key.public_key()) .sign( private_key=key, algorithm=hashes.SHA256(), backend=default_backend()) ) return b''.join(( key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()), cert.public_bytes(serialization.Encoding.PEM) ))
Generate a wildcard (subject name '*') self-signed certificate valid for 10 years. https://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate :return: Bytes representation of the PEM certificate data
def validate(self, schema): """ Validate VDOM against given JSON Schema Raises ValidationError if schema does not match """ try: validate(instance=self.to_dict(), schema=schema, cls=Draft4Validator) except ValidationError as e: raise ValidationError(_validate_err_template.format(VDOM_SCHEMA, e))
Validate VDOM against given JSON Schema Raises ValidationError if schema does not match
def namespace_map(self, target): """Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary """ self._check_target(target) return target.namespace_map or self._default_namespace_map
Returns the namespace_map used for Thrift generation. :param target: The target to extract the namespace_map from. :type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary` :returns: The namespaces to remap (old to new). :rtype: dictionary
def set_format(self, column_or_columns, formatter): """Set the format of a column.""" if inspect.isclass(formatter): formatter = formatter() if callable(formatter) and not hasattr(formatter, 'format_column'): formatter = _formats.FunctionFormatter(formatter) if not hasattr(formatter, 'format_column'): raise Exception('Expected Formatter or function: ' + str(formatter)) for label in self._as_labels(column_or_columns): if formatter.converts_values: self[label] = formatter.convert_column(self[label]) self._formats[label] = formatter return self
Set the format of a column.
def get_category_or_404(path): """ Retrieve a Category instance by a path. """ path_bits = [p for p in path.split('/') if p] return get_object_or_404(Category, slug=path_bits[-1])
Retrieve a Category instance by a path.
def build_application(conf): """Do some setup and return the wsgi app.""" if isinstance(conf.adapter_options, list): conf['adapter_options'] = {key: val for _dict in conf.adapter_options for key, val in _dict.items()} elif conf.adapter_options is None: conf['adapter_options'] = {} else: conf['adapter_options'] = copy.copy(conf.adapter_options) # get wsgi app the same way bottle does if it receives a string. conf['app'] = conf.app or bottle.default_app() if isinstance(conf.app, six.string_types): conf['app'] = bottle.load_app(conf.app) def _find_bottle_app(_app): """Lookup the underlying Bottle() instance.""" while hasattr(_app, 'app'): if isinstance(_app, bottle.Bottle): break _app = _app.app assert isinstance(_app, bottle.Bottle), 'Could not find Bottle app.' return _app bottle_app = _find_bottle_app(conf.app) bottle_app.route( path='/_simpl', method='GET', callback=_version_callback) def _show_routes(): """Conditionally print the app's routes.""" if conf.app and not conf.quiet: if conf.reloader and os.getenv('BOTTLE_CHILD'): LOG.info("Running bottle server with reloader.") elif not conf.reloader: pass else: return routes = fmt_routes(bottle_app) if routes: print('\n{}'.format(routes), end='\n\n') _show_routes() return conf.app
Do some setup and return the wsgi app.
def var(self, ddof=0): '''Calculate variance of timeseries. Return a vector containing the variances of each series in the timeseries. :parameter ddof: delta degree of freedom, the divisor used in the calculation is given by ``N - ddof`` where ``N`` represents the length of timeseries. Default ``0``. .. math:: var = \\frac{\\sum_i^N (x - \\mu)^2}{N-ddof} ''' N = len(self) if N: v = self.values() mu = sum(v) return (sum(v*v) - mu*mu/N)/(N-ddof) else: return None
Calculate variance of timeseries. Return a vector containing the variances of each series in the timeseries. :parameter ddof: delta degree of freedom, the divisor used in the calculation is given by ``N - ddof`` where ``N`` represents the length of timeseries. Default ``0``. .. math:: var = \\frac{\\sum_i^N (x - \\mu)^2}{N-ddof}
def show_in_view(self, sourceview, matches, targetname=None): """ Show search result in ncurses view. """ append = self.options.append_view or self.options.alter_view == 'append' remove = self.options.alter_view == 'remove' action_name = ', appending to' if append else ', removing from' if remove else ' into' targetname = config.engine.show(matches, targetname or self.options.to_view or "rtcontrol", append=append, disjoin=remove) msg = "Filtered %d out of %d torrents using [ %s ]" % ( len(matches), sourceview.size(), sourceview.matcher) self.LOG.info("%s%s rTorrent view %r." % (msg, action_name, targetname)) config.engine.log(msg)
Show search result in ncurses view.
def set_provider_links(self, resource_ids=None): """Sets a provider chain in order from the most recent source to the originating source. :param resource_ids: the new source :type resource_ids: ``osid.id.Id[]`` :raise: ``InvalidArgument`` -- ``resource_ids`` is invalid :raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true`` :raise: ``NullArgument`` -- ``resource_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if resource_ids is None: raise NullArgument() metadata = Metadata(**settings.METADATA['provider_link_ids']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(resource_ids, metadata, array=True): self._my_map['providerLinkIds'] = [] for i in resource_ids: self._my_map['providerLinkIds'].append(str(i)) else: raise InvalidArgument()
Sets a provider chain in order from the most recent source to the originating source. :param resource_ids: the new source :type resource_ids: ``osid.id.Id[]`` :raise: ``InvalidArgument`` -- ``resource_ids`` is invalid :raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true`` :raise: ``NullArgument`` -- ``resource_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def process_file(self, filename): """Processing one file.""" if self.config.dry_run: if not self.config.internal: self.logger.info("Dry run mode for script %s", filename) with open(filename) as handle: for line in handle: yield line[0:-1] if line[-1] == '\n' else line else: if not self.config.internal: self.logger.info("Running script %s", filename) for line in self.process_script(filename): yield line
Processing one file.
def _on_new_data_received(self, data: bytes): """ Gets called whenever we get a whole new XML element from kik's servers. :param data: The data received (bytes) """ if data == b' ': # Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back. self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ') return xml_element = BeautifulSoup(data.decode(), features='xml') xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element # choose the handler based on the XML tag name if xml_element.name == "k": self._handle_received_k_element(xml_element) if xml_element.name == "iq": self._handle_received_iq_element(xml_element) elif xml_element.name == "message": self._handle_xmpp_message(xml_element) elif xml_element.name == 'stc': self.callback.on_captcha_received(login.CaptchaElement(xml_element))
Gets called whenever we get a whole new XML element from kik's servers. :param data: The data received (bytes)
def expl_var(self, greenacre=True, N=None): """ Return proportion of explained inertia (variance) for each factor. :param greenacre: Perform Greenacre correction (default: True) """ if greenacre: greenacre_inertia = (self.K / (self.K - 1.) * (sum(self.s**4) - (self.J - self.K) / self.K**2.)) return (self._benzecri() / greenacre_inertia)[:N] else: E = self._benzecri() if self.cor else self.s**2 return (E / sum(E))[:N]
Return proportion of explained inertia (variance) for each factor. :param greenacre: Perform Greenacre correction (default: True)
def get(**kwargs): """ Safe sensor wrapper """ sensor = None tick = 0 driver = DHTReader(**kwargs) while not sensor and tick < TIME_LIMIT: try: sensor = driver.receive_data() except DHTException: tick += 1 return sensor
Safe sensor wrapper
def register_plugin(manager): ''' Register blueprints and actions using given plugin manager. :param manager: plugin manager :type manager: browsepy.manager.PluginManager ''' manager.register_blueprint(player) manager.register_mimetype_function(detect_playable_mimetype) # add style tag manager.register_widget( place='styles', type='stylesheet', endpoint='player.static', filename='css/browse.css' ) # register link actions manager.register_widget( place='entry-link', type='link', endpoint='player.audio', filter=PlayableFile.detect ) manager.register_widget( place='entry-link', icon='playlist', type='link', endpoint='player.playlist', filter=PlayListFile.detect ) # register action buttons manager.register_widget( place='entry-actions', css='play', type='button', endpoint='player.audio', filter=PlayableFile.detect ) manager.register_widget( place='entry-actions', css='play', type='button', endpoint='player.playlist', filter=PlayListFile.detect ) # check argument (see `register_arguments`) before registering if manager.get_argument('player_directory_play'): # register header button manager.register_widget( place='header', type='button', endpoint='player.directory', text='Play directory', filter=PlayableDirectory.detect )
Register blueprints and actions using given plugin manager. :param manager: plugin manager :type manager: browsepy.manager.PluginManager
def link_contentkey_authorization_policy(access_token, ckap_id, options_id, \ ams_redirected_rest_endpoint): '''Link Media Service Content Key Authorization Policy. Args: access_token (str): A valid Azure authentication token. ckap_id (str): A Media Service Asset Content Key Authorization Policy ID. options_id (str): A Media Service Content Key Authorization Policy Options . ams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint. Returns: HTTP response. JSON body. ''' path = '/ContentKeyAuthorizationPolicies' full_path = ''.join([path, "('", ckap_id, "')", "/$links/Options"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeyAuthorizationPolicyOptions', \ "('", options_id, "')"]) body = '{"uri": "' + uri + '"}' return do_ams_post(endpoint, full_path_encoded, body, access_token, "json_only", "1.0;NetFx")
Link Media Service Content Key Authorization Policy. Args: access_token (str): A valid Azure authentication token. ckap_id (str): A Media Service Asset Content Key Authorization Policy ID. options_id (str): A Media Service Content Key Authorization Policy Options . ams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint. Returns: HTTP response. JSON body.
def chop_into_sequences(episode_ids, unroll_ids, agent_indices, feature_columns, state_columns, max_seq_len, dynamic_max=True, _extra_padding=0): """Truncate and pad experiences into fixed-length sequences. Arguments: episode_ids (list): List of episode ids for each step. unroll_ids (list): List of identifiers for the sample batch. This is used to make sure sequences are cut between sample batches. agent_indices (list): List of agent ids for each step. Note that this has to be combined with episode_ids for uniqueness. feature_columns (list): List of arrays containing features. state_columns (list): List of arrays containing LSTM state values. max_seq_len (int): Max length of sequences before truncation. dynamic_max (bool): Whether to dynamically shrink the max seq len. For example, if max len is 20 and the actual max seq len in the data is 7, it will be shrunk to 7. _extra_padding (int): Add extra padding to the end of sequences. Returns: f_pad (list): Padded feature columns. These will be of shape [NUM_SEQUENCES * MAX_SEQ_LEN, ...]. s_init (list): Initial states for each sequence, of shape [NUM_SEQUENCES, ...]. seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES]. Examples: >>> f_pad, s_init, seq_lens = chop_into_sequences( episode_ids=[1, 1, 5, 5, 5, 5], unroll_ids=[4, 4, 4, 4, 4, 4], agent_indices=[0, 0, 0, 0, 0, 0], feature_columns=[[4, 4, 8, 8, 8, 8], [1, 1, 0, 1, 1, 0]], state_columns=[[4, 5, 4, 5, 5, 5]], max_seq_len=3) >>> print(f_pad) [[4, 4, 0, 8, 8, 8, 8, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0, 0]] >>> print(s_init) [[4, 4, 5]] >>> print(seq_lens) [2, 3, 1] """ prev_id = None seq_lens = [] seq_len = 0 unique_ids = np.add( np.add(episode_ids, agent_indices), np.array(unroll_ids) << 32) for uid in unique_ids: if (prev_id is not None and uid != prev_id) or \ seq_len >= max_seq_len: seq_lens.append(seq_len) seq_len = 0 seq_len += 1 prev_id = uid if seq_len: seq_lens.append(seq_len) assert sum(seq_lens) == len(unique_ids) # Dynamically shrink max len as needed to optimize memory usage if dynamic_max: max_seq_len = max(seq_lens) + _extra_padding feature_sequences = [] for f in feature_columns: f = np.array(f) f_pad = np.zeros((len(seq_lens) * max_seq_len, ) + np.shape(f)[1:]) seq_base = 0 i = 0 for l in seq_lens: for seq_offset in range(l): f_pad[seq_base + seq_offset] = f[i] i += 1 seq_base += max_seq_len assert i == len(unique_ids), f feature_sequences.append(f_pad) initial_states = [] for s in state_columns: s = np.array(s) s_init = [] i = 0 for l in seq_lens: s_init.append(s[i]) i += l initial_states.append(np.array(s_init)) return feature_sequences, initial_states, np.array(seq_lens)
Truncate and pad experiences into fixed-length sequences. Arguments: episode_ids (list): List of episode ids for each step. unroll_ids (list): List of identifiers for the sample batch. This is used to make sure sequences are cut between sample batches. agent_indices (list): List of agent ids for each step. Note that this has to be combined with episode_ids for uniqueness. feature_columns (list): List of arrays containing features. state_columns (list): List of arrays containing LSTM state values. max_seq_len (int): Max length of sequences before truncation. dynamic_max (bool): Whether to dynamically shrink the max seq len. For example, if max len is 20 and the actual max seq len in the data is 7, it will be shrunk to 7. _extra_padding (int): Add extra padding to the end of sequences. Returns: f_pad (list): Padded feature columns. These will be of shape [NUM_SEQUENCES * MAX_SEQ_LEN, ...]. s_init (list): Initial states for each sequence, of shape [NUM_SEQUENCES, ...]. seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES]. Examples: >>> f_pad, s_init, seq_lens = chop_into_sequences( episode_ids=[1, 1, 5, 5, 5, 5], unroll_ids=[4, 4, 4, 4, 4, 4], agent_indices=[0, 0, 0, 0, 0, 0], feature_columns=[[4, 4, 8, 8, 8, 8], [1, 1, 0, 1, 1, 0]], state_columns=[[4, 5, 4, 5, 5, 5]], max_seq_len=3) >>> print(f_pad) [[4, 4, 0, 8, 8, 8, 8, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0, 0]] >>> print(s_init) [[4, 4, 5]] >>> print(seq_lens) [2, 3, 1]
def LookupNamespace(self, prefix): """Resolves a namespace prefix in the scope of the current element. """ ret = libxml2mod.xmlTextReaderLookupNamespace(self._o, prefix) return ret
Resolves a namespace prefix in the scope of the current element.
def build_data_availability(datasets_json): """ Given datasets in JSON format, get the data availability from it if present """ data_availability = None if 'availability' in datasets_json and datasets_json.get('availability'): # only expect one paragraph of text data_availability = datasets_json.get('availability')[0].get('text') return data_availability
Given datasets in JSON format, get the data availability from it if present
def param_sweep(model, sequences, param_grid, n_jobs=1, verbose=0): """Fit a series of models over a range of parameters. Parameters ---------- model : msmbuilder.BaseEstimator An *instance* of an estimator to be used to fit data. sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. param_grid : dict or sklearn.grid_search.ParameterGrid Parameter grid to specify models to fit. See sklearn.grid_search.ParameterGrid for an explanation n_jobs : int, optional Number of jobs to run in parallel using joblib.Parallel Returns ------- models : list List of models fit to the data according to param_grid """ if isinstance(param_grid, dict): param_grid = ParameterGrid(param_grid) elif not isinstance(param_grid, ParameterGrid): raise ValueError("param_grid must be a dict or ParamaterGrid instance") # iterable with (model, sequence) as items iter_args = ((clone(model).set_params(**params), sequences) for params in param_grid) models = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_param_sweep_helper)(args) for args in iter_args) return models
Fit a series of models over a range of parameters. Parameters ---------- model : msmbuilder.BaseEstimator An *instance* of an estimator to be used to fit data. sequences : list of array-like List of sequences, or a single sequence. Each sequence should be a 1D iterable of state labels. Labels can be integers, strings, or other orderable objects. param_grid : dict or sklearn.grid_search.ParameterGrid Parameter grid to specify models to fit. See sklearn.grid_search.ParameterGrid for an explanation n_jobs : int, optional Number of jobs to run in parallel using joblib.Parallel Returns ------- models : list List of models fit to the data according to param_grid
def get_conn(self): """ Returns a Google Cloud Storage service object. """ if not self._conn: self._conn = storage.Client(credentials=self._get_credentials()) return self._conn
Returns a Google Cloud Storage service object.
def _data_from_dotnotation(self, key, default=None): """ Returns the MongoDB data from a key using dot notation. Args: key (str): The key to the field in the workflow document. Supports MongoDB's dot notation for embedded fields. default (object): The default value that is returned if the key does not exist. Returns: object: The data for the specified key or the default value. """ if key is None: raise KeyError('NoneType is not a valid key!') doc = self._collection.find_one({"_id": ObjectId(self._workflow_id)}) if doc is None: return default for k in key.split('.'): doc = doc[k] return doc
Returns the MongoDB data from a key using dot notation. Args: key (str): The key to the field in the workflow document. Supports MongoDB's dot notation for embedded fields. default (object): The default value that is returned if the key does not exist. Returns: object: The data for the specified key or the default value.
def snapshot(self, channel=0, path_file=None, timeout=None): """ Args: channel: Values according with Amcrest API: 0 - regular snapshot 1 - motion detection snapshot 2 - alarm snapshot If no channel param is used, default is 0 path_file: If path_file is provided, save the snapshot in the path Return: raw from http request """ ret = self.command( "snapshot.cgi?channel={0}".format(channel), timeout_cmd=timeout ) if path_file: with open(path_file, 'wb') as out_file: shutil.copyfileobj(ret.raw, out_file) return ret.raw
Args: channel: Values according with Amcrest API: 0 - regular snapshot 1 - motion detection snapshot 2 - alarm snapshot If no channel param is used, default is 0 path_file: If path_file is provided, save the snapshot in the path Return: raw from http request
def get_extension_rights(self): """GetExtensionRights. [Preview API] :rtype: :class:`<ExtensionRightsResult> <azure.devops.v5_0.licensing.models.ExtensionRightsResult>` """ response = self._send(http_method='GET', location_id='5f1dbe21-f748-47c7-b5fd-3770c8bc2c08', version='5.0-preview.1') return self._deserialize('ExtensionRightsResult', response)
GetExtensionRights. [Preview API] :rtype: :class:`<ExtensionRightsResult> <azure.devops.v5_0.licensing.models.ExtensionRightsResult>`
def expand(self, vs=None, conj=False): """Return the Shannon expansion with respect to a list of variables.""" vs = self._expect_vars(vs) if vs: outer, inner = (And, Or) if conj else (Or, And) terms = [inner(self.restrict(p), *boolfunc.point2term(p, conj)) for p in boolfunc.iter_points(vs)] if conj: terms = [term for term in terms if term is not One] else: terms = [term for term in terms if term is not Zero] return outer(*terms, simplify=False) else: return self
Return the Shannon expansion with respect to a list of variables.
def _matches(self, url, options, general_re, domain_required_rules, rules_with_options): """ Return if ``url``/``options`` are matched by rules defined by ``general_re``, ``domain_required_rules`` and ``rules_with_options``. ``general_re`` is a compiled regex for rules without options. ``domain_required_rules`` is a {domain: [rules_which_require_it]} mapping. ``rules_with_options`` is a list of AdblockRule instances that don't require any domain, but have other options. """ if general_re and general_re.search(url): return True rules = [] if 'domain' in options and domain_required_rules: src_domain = options['domain'] for domain in _domain_variants(src_domain): if domain in domain_required_rules: rules.extend(domain_required_rules[domain]) rules.extend(rules_with_options) if self.skip_unsupported_rules: rules = [rule for rule in rules if rule.matching_supported(options)] return any(rule.match_url(url, options) for rule in rules)
Return if ``url``/``options`` are matched by rules defined by ``general_re``, ``domain_required_rules`` and ``rules_with_options``. ``general_re`` is a compiled regex for rules without options. ``domain_required_rules`` is a {domain: [rules_which_require_it]} mapping. ``rules_with_options`` is a list of AdblockRule instances that don't require any domain, but have other options.
def _checkDragDropEvent(self, ev): """Checks if event contains a file URL, accepts if it does, ignores if it doesn't""" mimedata = ev.mimeData() if mimedata.hasUrls(): urls = [str(url.toLocalFile()) for url in mimedata.urls() if url.toLocalFile()] else: urls = [] # accept event if drag text is a file URL if urls: ev.acceptProposedAction() return urls else: ev.ignore() return None
Checks if event contains a file URL, accepts if it does, ignores if it doesn't
def container_move(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /container-xxxx/move API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2Fmove """ return DXHTTPRequest('/%s/move' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /container-xxxx/move API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2Fmove
def append(self, item): """Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list. """ if self.should_flush(): self.flush() self.items.append(item)
Add new item to the list. If needed, append will first flush existing items and clear existing items. Args: item: an item to add to the list.
def get_response_example(cls, operation, response): """ Get example for response object by operation object :param Operation operation: operation object :param Response response: response object """ path = "#/paths/'{}'/{}/responses/{}".format( operation.path, operation.method, response.name) kwargs = dict(paths=[path]) if response.type in PRIMITIVE_TYPES: result = cls.get_example_value_for_primitive_type( response.type, response.properties, response.type_format, **kwargs) else: schema = SchemaObjects.get(response.type) result = cls.get_example_by_schema(schema, **kwargs) return result
Get example for response object by operation object :param Operation operation: operation object :param Response response: response object
def _run(self, bundle, container_id=None, empty_process=False, log_path=None, pid_file=None, sync_socket=None, command="run", log_format="kubernetes"): ''' _run is the base function for run and create, the only difference between the two being that run does not have an option for sync_socket. Equivalent command line example: singularity oci create [create options...] <container_ID> Parameters ========== bundle: the full path to the bundle folder container_id: an optional container_id. If not provided, use same container_id used to generate OciImage instance empty_process: run container without executing container process (for example, for a pod container waiting for signals). This is a specific use case for tools like Kubernetes log_path: the path to store the log. pid_file: specify the pid file path to use sync_socket: the path to the unix socket for state synchronization. command: the command (run or create) to use (default is run) log_format: defaults to kubernetes. Can also be "basic" or "json" ''' container_id = self.get_container_id(container_id) # singularity oci create cmd = self._init_command(command) # Check that the bundle exists if not os.path.exists(bundle): bot.exit('Bundle not found at %s' % bundle) # Add the bundle cmd = cmd + ['--bundle', bundle] # Additional Logging Files cmd = cmd + ['--log-format', log_format] if log_path != None: cmd = cmd + ['--log-path', log_path] if pid_file != None: cmd = cmd + ['--pid-file', pid_file] if sync_socket != None: cmd = cmd + ['--sync-socket', sync_socket] if empty_process: cmd.append('--empty-process') # Finally, add the container_id cmd.append(container_id) # Generate the instance result = self._send_command(cmd, sudo=True) # Get the status to report to the user! # TODO: Singularity seems to create even with error, can we check and # delete for the user if this happens? return self.state(container_id, sudo=True, sync_socket=sync_socket)
_run is the base function for run and create, the only difference between the two being that run does not have an option for sync_socket. Equivalent command line example: singularity oci create [create options...] <container_ID> Parameters ========== bundle: the full path to the bundle folder container_id: an optional container_id. If not provided, use same container_id used to generate OciImage instance empty_process: run container without executing container process (for example, for a pod container waiting for signals). This is a specific use case for tools like Kubernetes log_path: the path to store the log. pid_file: specify the pid file path to use sync_socket: the path to the unix socket for state synchronization. command: the command (run or create) to use (default is run) log_format: defaults to kubernetes. Can also be "basic" or "json"
def visitFunctionCall(self, ctx): """ expression : fnname LPAREN parameters? RPAREN """ func_name = ctx.fnname().getText() if ctx.parameters() is not None: parameters = self.visit(ctx.parameters()) else: parameters = [] return self._functions.invoke_function(self._eval_context, func_name, parameters)
expression : fnname LPAREN parameters? RPAREN
def LazyField(lookup_name, scope): """Super non-standard stuff here. Dynamically changing the base class using the scope and the lazy name when the class is instantiated. This works as long as the original base class is not directly inheriting from object (which we're not, since our original base class is fields.Field). """ def __init__(self, stream=None): base_cls = self._pfp__scope.get_id(self._pfp__lazy_name) self.__class__.__bases__ = (base_cls,) base_cls.__init__(self, stream) new_class = type(lookup_name + "_lazy", (fields.Field,), { "__init__" : __init__, "_pfp__scope" : scope, "_pfp__lazy_name" : lookup_name }) return new_class
Super non-standard stuff here. Dynamically changing the base class using the scope and the lazy name when the class is instantiated. This works as long as the original base class is not directly inheriting from object (which we're not, since our original base class is fields.Field).
def static_serve(request, path, client): """ Given a request for a media asset, this view does the necessary wrangling to get the correct thing delivered to the user. This can also emulate the combo behavior seen when SERVE_REMOTE == False and EMULATE_COMBO == True. """ if msettings['SERVE_REMOTE']: # We're serving from S3, redirect there. url = client.remote_media_url().strip('/') + '/%(path)s' return redirect(url, permanent=True) if not msettings['SERVE_REMOTE'] and msettings['EMULATE_COMBO']: # Combo emulation is on and we're serving media locally. Try to see if # the given path matches a combo file defined in the JOINED dict in # the MEDIASYNC settings dict. combo_match = _find_combo_match(path) if combo_match: # We found a combo file match. Combine it and serve the result. return combo_serve(request, combo_match, client) # No combo file, but we're serving locally. Use the standard (inefficient) # Django static serve view. resp = serve(request, path, document_root=client.media_root, show_indexes=True) try: resp.content = client.process(resp.content, resp['Content-Type'], path) except KeyError: # HTTPNotModifiedResponse lacks the "Content-Type" key. pass return resp
Given a request for a media asset, this view does the necessary wrangling to get the correct thing delivered to the user. This can also emulate the combo behavior seen when SERVE_REMOTE == False and EMULATE_COMBO == True.
def generate_pagerank_graph(num_vertices=250, **kwargs): """Creates a random graph where the vertex types are selected using their pagerank. Calls :func:`.minimal_random_graph` and then :func:`.set_types_rank` where the ``rank`` keyword argument is given by :func:`networkx.pagerank`. Parameters ---------- num_vertices : int (optional, the default is 250) The number of vertices in the graph. **kwargs : Any parameters to send to :func:`.minimal_random_graph` or :func:`.set_types_rank`. Returns ------- :class:`.QueueNetworkDiGraph` A graph with a ``pos`` vertex property and the ``edge_type`` edge property. Notes ----- This function sets the edge types of a graph to be either 1, 2, or 3. It sets the vertices to type 2 by selecting the top ``pType2 * g.number_of_nodes()`` vertices given by the :func:`~networkx.pagerank` of the graph. A loop is added to all vertices identified this way (if one does not exist already). It then randomly sets vertices close to the type 2 vertices as type 3, and adds loops to these vertices as well. These loops then have edge types that correspond to the vertices type. The rest of the edges are set to type 1. """ g = minimal_random_graph(num_vertices, **kwargs) r = np.zeros(num_vertices) for k, pr in nx.pagerank(g).items(): r[k] = pr g = set_types_rank(g, rank=r, **kwargs) return g
Creates a random graph where the vertex types are selected using their pagerank. Calls :func:`.minimal_random_graph` and then :func:`.set_types_rank` where the ``rank`` keyword argument is given by :func:`networkx.pagerank`. Parameters ---------- num_vertices : int (optional, the default is 250) The number of vertices in the graph. **kwargs : Any parameters to send to :func:`.minimal_random_graph` or :func:`.set_types_rank`. Returns ------- :class:`.QueueNetworkDiGraph` A graph with a ``pos`` vertex property and the ``edge_type`` edge property. Notes ----- This function sets the edge types of a graph to be either 1, 2, or 3. It sets the vertices to type 2 by selecting the top ``pType2 * g.number_of_nodes()`` vertices given by the :func:`~networkx.pagerank` of the graph. A loop is added to all vertices identified this way (if one does not exist already). It then randomly sets vertices close to the type 2 vertices as type 3, and adds loops to these vertices as well. These loops then have edge types that correspond to the vertices type. The rest of the edges are set to type 1.
def write(self, process_tile, data): """ Write data from process tiles into PNG file(s). Parameters ---------- process_tile : ``BufferedTile`` must be member of process ``TilePyramid`` """ data = self._prepare_array(data) if data.mask.all(): logger.debug("data empty, nothing to write") else: # in case of S3 output, create an boto3 resource bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None # Convert from process_tile to output_tiles and write for tile in self.pyramid.intersecting(process_tile): out_path = self.get_path(tile) self.prepare_path(tile) out_tile = BufferedTile(tile, self.pixelbuffer) write_raster_window( in_tile=process_tile, in_data=data, out_profile=self.profile(out_tile), out_tile=out_tile, out_path=out_path, bucket_resource=bucket_resource )
Write data from process tiles into PNG file(s). Parameters ---------- process_tile : ``BufferedTile`` must be member of process ``TilePyramid``
def readWiggleLine(self, line): """ Read a wiggle line. If it is a data line, add values to the protocol object. """ if(line.isspace() or line.startswith("#") or line.startswith("browser") or line.startswith("track")): return elif line.startswith("variableStep"): self._mode = self._VARIABLE_STEP self.parseStep(line) return elif line.startswith("fixedStep"): self._mode = self._FIXED_STEP self.parseStep(line) return elif self._mode is None: raise ValueError("Unexpected input line: %s" % line.strip()) if self._queryReference != self._reference: return # read data lines fields = line.split() if self._mode == self._VARIABLE_STEP: start = int(fields[0])-1 # to 0-based val = float(fields[1]) else: start = self._start self._start += self._step val = float(fields[0]) if start < self._queryEnd and start > self._queryStart: if self._position is None: self._position = start self._data.start = start # fill gap while self._position < start: self._data.values.append(float('NaN')) self._position += 1 for _ in xrange(self._span): self._data.values.append(val) self._position += self._span
Read a wiggle line. If it is a data line, add values to the protocol object.
def field_xpath(field, attribute): """ Field helper functions to locate select, textarea, and the other types of input fields (text, checkbox, radio) :param field: One of the values 'select', 'textarea', 'option', or 'button-element' to match a corresponding HTML element (and to match a <button> in the case of 'button-element'). Otherwise a type to match an <input> element with a type=<field> attribute. :param attribute: An attribute to be matched against, or 'value' to match against the content within element being matched. """ if field in ['select', 'textarea']: xpath = './/{field}[@{attr}=%s]' elif field == 'button-role': if attribute == 'value': xpath = './/*[@role="button"][contains(., %s)]' else: xpath = './/*[@role="button"][@{attr}=%s]' elif field == 'button-element': field = 'button' if attribute == 'value': xpath = './/{field}[contains(., %s)]' else: xpath = './/{field}[@{attr}=%s]' elif field == 'option': xpath = './/{field}[@{attr}=%s]' else: xpath = './/input[@{attr}=%s][@type="{field}"]' return xpath.format(field=field, attr=attribute)
Field helper functions to locate select, textarea, and the other types of input fields (text, checkbox, radio) :param field: One of the values 'select', 'textarea', 'option', or 'button-element' to match a corresponding HTML element (and to match a <button> in the case of 'button-element'). Otherwise a type to match an <input> element with a type=<field> attribute. :param attribute: An attribute to be matched against, or 'value' to match against the content within element being matched.
def list_mapping(html_cleaned): """将预处理后的网页文档映射成列表和字典,并提取虚假标题 Keyword arguments: html_cleaned -- 预处理后的网页源代码,字符串类型 Return: unit_raw -- 网页文本行 init_dict -- 字典的key是索引,value是网页文本行,并按照网页文本行长度降序排序 fake_title -- 虚假标题,即网页源代码<title>中的文本行 """ unit_raw = html_cleaned.split('\n') for i in unit_raw: c = CDM(i) if c.PTN is not 0: fake_title = i break init_list = [] init_dict = {} for i in unit_raw: init_list.append(len(i)) for i in range(0, len(init_list)): init_dict[i] = init_list[i] init_dict = sorted(init_dict.items(), key=lambda item: item[1], reverse=True) try: log('debug', '映射成功,提取的虚假标题为:【{}】'.format(fake_title)) except UnboundLocalError: fake_title = '' log('err', '虚假标题提取失败') return unit_raw, init_dict, fake_title
将预处理后的网页文档映射成列表和字典,并提取虚假标题 Keyword arguments: html_cleaned -- 预处理后的网页源代码,字符串类型 Return: unit_raw -- 网页文本行 init_dict -- 字典的key是索引,value是网页文本行,并按照网页文本行长度降序排序 fake_title -- 虚假标题,即网页源代码<title>中的文本行
def unmake(self): """This method is equivalent to delete except that it uses message-passing instead of directly deleting the instance. """ if lib.EnvUnmakeInstance(self._env, self._ist) != 1: raise CLIPSError(self._env)
This method is equivalent to delete except that it uses message-passing instead of directly deleting the instance.
def destroy(name): ''' removes a container [stops a container if it's running and] raises ContainerNotExists exception if the specified name is not created ''' if not exists(name): raise ContainerNotExists("The container (%s) does not exist!" % name) cmd = ['lxc-destroy', '-f', '-n', name] subprocess.check_call(cmd)
removes a container [stops a container if it's running and] raises ContainerNotExists exception if the specified name is not created
def conditional_distribution(self, values, inplace=True): """ Returns Conditional Probability Distribution after setting values to 1. Parameters ---------- values: list or array_like A list of tuples of the form (variable_name, variable_state). The values on which to condition the Joint Probability Distribution. inplace: Boolean (default True) If False returns a new instance of JointProbabilityDistribution Examples -------- >>> import numpy as np >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8) >>> prob.conditional_distribution([('x1', 1)]) >>> print(prob) x2 x3 P(x2,x3) ---- ---- ---------- x2_0 x3_0 0.2500 x2_0 x3_1 0.2500 x2_1 x3_0 0.2500 x2_1 x3_1 0.2500 """ JPD = self if inplace else self.copy() JPD.reduce(values) JPD.normalize() if not inplace: return JPD
Returns Conditional Probability Distribution after setting values to 1. Parameters ---------- values: list or array_like A list of tuples of the form (variable_name, variable_state). The values on which to condition the Joint Probability Distribution. inplace: Boolean (default True) If False returns a new instance of JointProbabilityDistribution Examples -------- >>> import numpy as np >>> from pgmpy.factors.discrete import JointProbabilityDistribution >>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)/8) >>> prob.conditional_distribution([('x1', 1)]) >>> print(prob) x2 x3 P(x2,x3) ---- ---- ---------- x2_0 x3_0 0.2500 x2_0 x3_1 0.2500 x2_1 x3_0 0.2500 x2_1 x3_1 0.2500