code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _query_mysql(self): """ Queries mysql and returns a cursor to the results. """ mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id) conn = mysql.get_conn() cursor = conn.cursor() cursor.execute(self.sql) return cursor
Queries mysql and returns a cursor to the results.
def path(string): """ Define the 'path' data type that can be used by apps. """ if not os.path.exists(string): msg = "Path %s not found!" % string raise ArgumentTypeError(msg) return string
Define the 'path' data type that can be used by apps.
def add_arc(self, src, dst, char): """Adds a new Arc Args: src (int): The source state identifier dst (int): The destination state identifier char (str): The character for the transition Returns: None """ if src not in self.automaton.states(): self.add_state() arc = fst.Arc(self.isyms[char], self.osyms[char], fst.Weight.One(self.automaton.weight_type()), dst) self.automaton.add_arc(src, arc)
Adds a new Arc Args: src (int): The source state identifier dst (int): The destination state identifier char (str): The character for the transition Returns: None
def itertrain(self, train, valid=None, **kwargs): '''Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset. ''' from . import feedforward original_layer_names = set(l.name for l in self.network.layers[:-1]) # construct a "shadow" of the input network, using the original # network's encoding layers, with tied weights in an autoencoder # configuration. layers_ = list(l.to_spec() for l in self.network.layers[:-1]) for i, l in enumerate(layers_[::-1][:-2]): layers_.append(dict( form='tied', partner=l['name'], activation=l['activation'])) layers_.append(dict( form='tied', partner=layers_[1]['name'], activation='linear')) util.log('creating shadow network') ae = feedforward.Autoencoder(layers=layers_) # train the autoencoder using the supervised layerwise pretrainer. pre = SupervisedPretrainer(self.algo, ae) for monitors in pre.itertrain(train, valid, **kwargs): yield monitors # copy trained parameter values back to our original network. for param in ae.params: l, p = param.name.split('.') if l in original_layer_names: util.log('copying pretrained parameter {}', param.name) self.network.find(l, p).set_value(param.get_value()) util.log('completed unsupervised pretraining')
Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset.
def _resolve_dut_count(self): """ Calculates total amount of resources required and their types. :return: Nothing, modifies _dut_count, _hardware_count and _process_count :raises: ValueError if total count does not match counts of types separately. """ self._dut_count = len(self._dut_requirements) self._resolve_process_count() self._resolve_hardware_count() if self._dut_count != self._hardware_count + self._process_count: raise ValueError("Missing or invalid type fields in dut configuration!")
Calculates total amount of resources required and their types. :return: Nothing, modifies _dut_count, _hardware_count and _process_count :raises: ValueError if total count does not match counts of types separately.
def assign_reads_to_otus(original_fasta, filtered_fasta, output_filepath=None, log_name="assign_reads_to_otus.log", perc_id_blast=0.97, global_alignment=True, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Uses original fasta file, blasts to assign reads to filtered fasta original_fasta = filepath to original query fasta filtered_fasta = filepath to enumerated, filtered fasta output_filepath = output path to clusters (uc) file log_name = string specifying output log name perc_id_blast = percent ID for blasting original seqs against filtered set usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. In post chimera checked sequences, the seqs are sorted by abundance, so this should be set to True. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ # Not sure if I feel confortable using blast as a way to recapitulate # original read ids.... if not output_filepath: _, output_filepath = mkstemp(prefix='assign_reads_to_otus', suffix='.uc') log_filepath = join(working_dir, log_name) params = {'--id': perc_id_blast, '--global': global_alignment} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) data = {'--query': original_fasta, '--db': filtered_fasta, '--uc': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) return app_result, output_filepath
Uses original fasta file, blasts to assign reads to filtered fasta original_fasta = filepath to original query fasta filtered_fasta = filepath to enumerated, filtered fasta output_filepath = output path to clusters (uc) file log_name = string specifying output log name perc_id_blast = percent ID for blasting original seqs against filtered set usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. In post chimera checked sequences, the seqs are sorted by abundance, so this should be set to True. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created.
def reverse(cls, value, prop, visitor): """Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called for ``cast`` operations. The default implementation passes through but squashes exceptions, just like apply. """ return ( None if isinstance(value, (AttributeError, KeyError)) else value )
Like :py:meth:`normalize.visitor.VisitorPattern.apply` but called for ``cast`` operations. The default implementation passes through but squashes exceptions, just like apply.
def create(cls, currency, all_co_owner, description=None, daily_limit=None, overdraft_limit=None, alias=None, avatar_uuid=None, status=None, sub_status=None, reason=None, reason_description=None, notification_filters=None, setting=None, custom_headers=None): """ :type user_id: int :param currency: The currency of the MonetaryAccountJoint as an ISO 4217 formatted currency code. :type currency: str :param all_co_owner: The users the account will be joint with. :type all_co_owner: list[object_.CoOwner] :param description: The description of the MonetaryAccountJoint. Defaults to 'bunq account'. :type description: str :param daily_limit: The daily spending limit Amount of the MonetaryAccountJoint. Defaults to 1000 EUR. Currency must match the MonetaryAccountJoint's currency. Limited to 10000 EUR. :type daily_limit: object_.Amount :param overdraft_limit: The maximum Amount the MonetaryAccountJoint can be 'in the red'. Must be 0 EUR or omitted. :type overdraft_limit: object_.Amount :param alias: The Aliases to add to MonetaryAccountJoint. Must all be confirmed first. Can mostly be ignored. :type alias: list[object_.Pointer] :param avatar_uuid: The UUID of the Avatar of the MonetaryAccountJoint. :type avatar_uuid: str :param status: The status of the MonetaryAccountJoint. Ignored in POST requests (always set to ACTIVE) can be CANCELLED or PENDING_REOPEN in PUT requests to cancel (close) or reopen the MonetaryAccountJoint. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type status: str :param sub_status: The sub-status of the MonetaryAccountJoint providing extra information regarding the status. Should be ignored for POST requests. In case of PUT requests with status CANCELLED it can only be REDEMPTION_VOLUNTARY, while with status PENDING_REOPEN it can only be NONE. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type sub_status: str :param reason: The reason for voluntarily cancelling (closing) the MonetaryAccountJoint, can only be OTHER. Should only be specified if updating the status to CANCELLED. :type reason: str :param reason_description: The optional free-form reason for voluntarily cancelling (closing) the MonetaryAccountJoint. Can be any user provided message. Should only be specified if updating the status to CANCELLED. :type reason_description: str :param notification_filters: The types of notifications that will result in a push notification or URL callback for this MonetaryAccountJoint. :type notification_filters: list[object_.NotificationFilter] :param setting: The settings of the MonetaryAccountJoint. :type setting: object_.MonetaryAccountSetting :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_CURRENCY: currency, cls.FIELD_DESCRIPTION: description, cls.FIELD_DAILY_LIMIT: daily_limit, cls.FIELD_OVERDRAFT_LIMIT: overdraft_limit, cls.FIELD_ALIAS: alias, cls.FIELD_AVATAR_UUID: avatar_uuid, cls.FIELD_STATUS: status, cls.FIELD_SUB_STATUS: sub_status, cls.FIELD_REASON: reason, cls.FIELD_REASON_DESCRIPTION: reason_description, cls.FIELD_ALL_CO_OWNER: all_co_owner, cls.FIELD_NOTIFICATION_FILTERS: notification_filters, cls.FIELD_SETTING: setting } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id()) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
:type user_id: int :param currency: The currency of the MonetaryAccountJoint as an ISO 4217 formatted currency code. :type currency: str :param all_co_owner: The users the account will be joint with. :type all_co_owner: list[object_.CoOwner] :param description: The description of the MonetaryAccountJoint. Defaults to 'bunq account'. :type description: str :param daily_limit: The daily spending limit Amount of the MonetaryAccountJoint. Defaults to 1000 EUR. Currency must match the MonetaryAccountJoint's currency. Limited to 10000 EUR. :type daily_limit: object_.Amount :param overdraft_limit: The maximum Amount the MonetaryAccountJoint can be 'in the red'. Must be 0 EUR or omitted. :type overdraft_limit: object_.Amount :param alias: The Aliases to add to MonetaryAccountJoint. Must all be confirmed first. Can mostly be ignored. :type alias: list[object_.Pointer] :param avatar_uuid: The UUID of the Avatar of the MonetaryAccountJoint. :type avatar_uuid: str :param status: The status of the MonetaryAccountJoint. Ignored in POST requests (always set to ACTIVE) can be CANCELLED or PENDING_REOPEN in PUT requests to cancel (close) or reopen the MonetaryAccountJoint. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type status: str :param sub_status: The sub-status of the MonetaryAccountJoint providing extra information regarding the status. Should be ignored for POST requests. In case of PUT requests with status CANCELLED it can only be REDEMPTION_VOLUNTARY, while with status PENDING_REOPEN it can only be NONE. When updating the status and/or sub_status no other fields can be updated in the same request (and vice versa). :type sub_status: str :param reason: The reason for voluntarily cancelling (closing) the MonetaryAccountJoint, can only be OTHER. Should only be specified if updating the status to CANCELLED. :type reason: str :param reason_description: The optional free-form reason for voluntarily cancelling (closing) the MonetaryAccountJoint. Can be any user provided message. Should only be specified if updating the status to CANCELLED. :type reason_description: str :param notification_filters: The types of notifications that will result in a push notification or URL callback for this MonetaryAccountJoint. :type notification_filters: list[object_.NotificationFilter] :param setting: The settings of the MonetaryAccountJoint. :type setting: object_.MonetaryAccountSetting :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
def set_session_cache_mode(self, mode): """ Set the behavior of the session cache used by all connections using this Context. The previously set mode is returned. See :const:`SESS_CACHE_*` for details about particular modes. :param mode: One or more of the SESS_CACHE_* flags (combine using bitwise or) :returns: The previously set caching mode. .. versionadded:: 0.14 """ if not isinstance(mode, integer_types): raise TypeError("mode must be an integer") return _lib.SSL_CTX_set_session_cache_mode(self._context, mode)
Set the behavior of the session cache used by all connections using this Context. The previously set mode is returned. See :const:`SESS_CACHE_*` for details about particular modes. :param mode: One or more of the SESS_CACHE_* flags (combine using bitwise or) :returns: The previously set caching mode. .. versionadded:: 0.14
def create_manually(cls, validation_function_name, # type: str var_name, # type: str var_value, validation_outcome=None, # type: Any help_msg=None, # type: str append_details=True, # type: bool **kw_context_args): """ Creates an instance without using a Validator. This method is not the primary way that errors are created - they should rather created by the validation entry points. However it can be handy in rare edge cases. :param validation_function_name: :param var_name: :param var_value: :param validation_outcome: :param help_msg: :param append_details: :param kw_context_args: :return: """ # create a dummy validator def val_fun(x): pass val_fun.__name__ = validation_function_name validator = Validator(val_fun, error_type=cls, help_msg=help_msg, **kw_context_args) # create the exception # e = cls(validator, var_value, var_name, validation_outcome=validation_outcome, help_msg=help_msg, # append_details=append_details, **kw_context_args) e = validator._create_validation_error(var_name, var_value, validation_outcome, error_type=cls, help_msg=help_msg, **kw_context_args) return e
Creates an instance without using a Validator. This method is not the primary way that errors are created - they should rather created by the validation entry points. However it can be handy in rare edge cases. :param validation_function_name: :param var_name: :param var_value: :param validation_outcome: :param help_msg: :param append_details: :param kw_context_args: :return:
def start(self): """ Start a concurrent operation. If we are below the limit, we increment the concurrency count and fire the deferred we return. If not, we add the deferred to the waiters list and return it unfired. """ # While the implemetation matches the description in the docstring # conceptually, it always adds a new waiter and then calls # _check_concurrent() to handle the various cases. if self._limit < 0: # Special case for no limit, never block. return succeed(None) elif self._limit == 0: # Special case for limit of zero, always block forever. return Deferred() d = self._make_waiter() self._check_concurrent() return d
Start a concurrent operation. If we are below the limit, we increment the concurrency count and fire the deferred we return. If not, we add the deferred to the waiters list and return it unfired.
def bytes2uuid(b): """ Return standard human-friendly UUID. """ if b.strip(chr(0)) == '': return None s = b.encode('hex') return "%s-%s-%s-%s-%s" % (s[0:8], s[8:12], s[12:16], s[16:20], s[20:])
Return standard human-friendly UUID.
def _get_input(self, length): """! @brief Extract requested amount of data from the read buffer.""" self._buffer_lock.acquire() try: if length == -1: actualLength = len(self._buffer) else: actualLength = min(length, len(self._buffer)) if actualLength: data = self._buffer[:actualLength] self._buffer = self._buffer[actualLength:] else: data = bytearray() return data finally: self._buffer_lock.release()
! @brief Extract requested amount of data from the read buffer.
def get_asset_search_session(self, proxy): """Gets an asset search session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetSearchSession) - an AssetSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_search() is false compliance: optional - This method must be implemented if supports_asset_search() is true. """ if not self.supports_asset_search(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.AssetSearchSession(proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
Gets an asset search session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetSearchSession) - an AssetSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_search() is false compliance: optional - This method must be implemented if supports_asset_search() is true.
def zip_response(request, filename, files): """Return a Response object that is a zipfile with name filename. :param request: The request object. :param filename: The filename the browser should save the file as. :param files: A list of mappings between filenames (path/.../file) to file objects. """ tmp_file = NamedTemporaryFile() try: with ZipFile(tmp_file, 'w') as zip_file: for zip_path, actual_path in files: zip_file.write(actual_path, zip_path) tmp_file.flush() # Just in case response = FileResponse(tmp_file.name, request=request, content_type=str('application/zip')) response.headers['Content-disposition'] = ('attachment; filename="{0}"' .format(filename)) return response finally: tmp_file.close()
Return a Response object that is a zipfile with name filename. :param request: The request object. :param filename: The filename the browser should save the file as. :param files: A list of mappings between filenames (path/.../file) to file objects.
def max_layout_dimensions(dimensions): """ Take the maximum of a list of :class:`.LayoutDimension` instances. """ min_ = max([d.min for d in dimensions if d.min is not None]) max_ = max([d.max for d in dimensions if d.max is not None]) preferred = max([d.preferred for d in dimensions]) return LayoutDimension(min=min_, max=max_, preferred=preferred)
Take the maximum of a list of :class:`.LayoutDimension` instances.
def update_model_params(self, **params): r"""Update given model parameter if they are set to specific values""" for key, value in params.items(): if not hasattr(self, key): setattr(self, key, value) # set parameter for the first time. elif getattr(self, key) is None: setattr(self, key, value) # update because this parameter is still None. elif value is not None: setattr(self, key, value)
r"""Update given model parameter if they are set to specific values
def is_tagged(required_tags, has_tags): """Checks if tags match""" if not required_tags and not has_tags: return True elif not required_tags: return False found_tags = [] for tag in required_tags: if tag in has_tags: found_tags.append(tag) return len(found_tags) == len(required_tags)
Checks if tags match
def from_json(value, **kwargs): """Convert a PNG Image from base64-encoded JSON""" if not value.startswith(PNG_PREAMBLE): raise ValueError('Not a valid base64-encoded PNG image') infile = BytesIO() rep = base64.b64decode(value[len(PNG_PREAMBLE):].encode('utf-8')) infile.write(rep) infile.seek(0) return infile
Convert a PNG Image from base64-encoded JSON
def diamond_functions(xx, yy, y_x0, x_y0): """ Method that creates two upper and lower functions based on points xx and yy as well as intercepts defined by y_x0 and x_y0. The resulting functions form kind of a distorted diamond-like structure aligned from point xx to point yy. Schematically : xx is symbolized by x, yy is symbolized by y, y_x0 is equal to the distance from x to a, x_y0 is equal to the distance from x to b, the lines a-p and b-q are parallel to the line x-y such that points p and q are obtained automatically. In case of an increasing diamond the lower function is x-b-q and the upper function is a-p-y while in case of a decreasing diamond, the lower function is a-p-y and the upper function is x-b-q. Increasing diamond | Decreasing diamond p--y x----b / /| |\ \ / / | | \ q / / | a \ | a / | \ \ | | / q \ \ | |/ / \ \| x----b p--y Args: xx: First point yy: Second point Returns: A dictionary with the lower and upper diamond functions. """ npxx = np.array(xx) npyy = np.array(yy) if np.any(npxx == npyy): raise RuntimeError('Invalid points for diamond_functions') if np.all(npxx < npyy) or np.all(npxx > npyy): if npxx[0] < npyy[0]: p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx else: if npxx[0] < npyy[0]: p1 = npxx p2 = npyy else: p1 = npyy p2 = npxx slope = (p2[1]-p1[1]) / (p2[0]- p1[0]) if slope > 0.0: x_bpoint = p1[0] + x_y0 myy = p1[1] bq_intercept = myy - slope*x_bpoint myx = p1[0] myy = p1[1] + y_x0 ap_intercept = myy - slope*myx x_ppoint = (p2[1] - ap_intercept) / slope def lower(x): return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept) def upper(x): return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept) else: x_bpoint = p1[0] + x_y0 myy = p1[1] bq_intercept = myy - slope * x_bpoint myx = p1[0] myy = p1[1] - y_x0 ap_intercept = myy - slope * myx x_ppoint = (p2[1] - ap_intercept) / slope def lower(x): return np.where(x >= x_ppoint, p2[1] * np.ones_like(x), slope * x + ap_intercept) def upper(x): return np.where(x <= x_bpoint, p1[1] * np.ones_like(x), slope * x + bq_intercept) return {'lower': lower, 'upper': upper}
Method that creates two upper and lower functions based on points xx and yy as well as intercepts defined by y_x0 and x_y0. The resulting functions form kind of a distorted diamond-like structure aligned from point xx to point yy. Schematically : xx is symbolized by x, yy is symbolized by y, y_x0 is equal to the distance from x to a, x_y0 is equal to the distance from x to b, the lines a-p and b-q are parallel to the line x-y such that points p and q are obtained automatically. In case of an increasing diamond the lower function is x-b-q and the upper function is a-p-y while in case of a decreasing diamond, the lower function is a-p-y and the upper function is x-b-q. Increasing diamond | Decreasing diamond p--y x----b / /| |\ \ / / | | \ q / / | a \ | a / | \ \ | | / q \ \ | |/ / \ \| x----b p--y Args: xx: First point yy: Second point Returns: A dictionary with the lower and upper diamond functions.
def idPlayerResults(cfg, rawResult): """interpret standard rawResult for all players with known IDs""" result = {} knownPlayers = [] dictResult = {plyrRes.player_id : plyrRes.result for plyrRes in rawResult} for p in cfg.players: if p.playerID and p.playerID in dictResult: # identified player w/ result knownPlayers.append(p) result[p.name] = dictResult[p.playerID] #if len(knownPlayers) == len(dictResult) - 1: # identified all but one player # for p in cfg.players: # search for the not identified player # if p in knownPlayers: continue # already found # result.append( [p.name, p.playerID, dictResult[p.playerID]] ) # break # found missing player; stop searching #for r in result: # print("result:>", r) return result
interpret standard rawResult for all players with known IDs
def PositionedPhoneme(phoneme, word_initial = False, word_final = False, syllable_initial = False, syllable_final = False, env_start = False, env_end = False): ''' A decorator for phonemes, used in applying rules over words. Returns a copy of the input phoneme, with additional attributes, specifying whether the phoneme occurs at a word or syllable boundary, or its position in an environment. ''' pos_phoneme = deepcopy(phoneme) pos_phoneme.word_initial = word_initial pos_phoneme.word_final = word_final pos_phoneme.syllable_initial = syllable_initial pos_phoneme.syllable_final = syllable_final pos_phoneme.env_start = env_start pos_phoneme.env_end = env_end return pos_phoneme
A decorator for phonemes, used in applying rules over words. Returns a copy of the input phoneme, with additional attributes, specifying whether the phoneme occurs at a word or syllable boundary, or its position in an environment.
def start(self) -> None: """ Start the internal control loop. Potentially blocking, depending on the value of `_run_control_loop` set by the initializer. """ self._setup() if self._run_control_loop: asyncio.set_event_loop(asyncio.new_event_loop()) self._heartbeat_reciever.start() self._logger.info(' Start Loop') return self.loop.start() else: self._logger.debug(' run_control_loop == False')
Start the internal control loop. Potentially blocking, depending on the value of `_run_control_loop` set by the initializer.
def region(self): """ Get the region :class:`language_tags.Subtag.Subtag` of the tag. :return: region :class:`language_tags.Subtag.Subtag` that is part of the tag. The return can be None. """ region_item = [subtag for subtag in self.subtags if subtag.type == 'region'] return region_item[0] if len(region_item) > 0 else None
Get the region :class:`language_tags.Subtag.Subtag` of the tag. :return: region :class:`language_tags.Subtag.Subtag` that is part of the tag. The return can be None.
def bluemix(cls, vcap_services, instance_name=None, service_name=None, **kwargs): """ Create a Cloudant session using a VCAP_SERVICES environment variable. :param vcap_services: VCAP_SERVICES environment variable :type vcap_services: dict or str :param str instance_name: Optional Bluemix instance name. Only required if multiple Cloudant instances are available. :param str service_name: Optional Bluemix service name. Example usage: .. code-block:: python import os from cloudant.client import Cloudant client = Cloudant.bluemix(os.getenv('VCAP_SERVICES'), 'Cloudant NoSQL DB') print client.all_dbs() """ service_name = service_name or 'cloudantNoSQLDB' # default service try: service = CloudFoundryService(vcap_services, instance_name=instance_name, service_name=service_name) except CloudantException: raise CloudantClientException(103) if hasattr(service, 'iam_api_key'): return Cloudant.iam(service.username, service.iam_api_key, url=service.url, **kwargs) return Cloudant(service.username, service.password, url=service.url, **kwargs)
Create a Cloudant session using a VCAP_SERVICES environment variable. :param vcap_services: VCAP_SERVICES environment variable :type vcap_services: dict or str :param str instance_name: Optional Bluemix instance name. Only required if multiple Cloudant instances are available. :param str service_name: Optional Bluemix service name. Example usage: .. code-block:: python import os from cloudant.client import Cloudant client = Cloudant.bluemix(os.getenv('VCAP_SERVICES'), 'Cloudant NoSQL DB') print client.all_dbs()
def main(argv=None): """Generates documentation for signature generation pipeline""" parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( 'pipeline', help='Python dotted path to rules pipeline to document' ) parser.add_argument('output', help='output file') if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) print('Generating documentation for %s in %s...' % (args.pipeline, args.output)) rules = import_rules(args.pipeline) with open(args.output, 'w') as fp: fp.write('.. THIS IS AUTOGEMERATED USING:\n') fp.write(' \n') fp.write(' %s\n' % (' '.join(sys.argv))) fp.write(' \n') fp.write('Signature generation rules pipeline\n') fp.write('===================================\n') fp.write('\n') fp.write('\n') fp.write( 'This is the signature generation pipeline defined at ``%s``:\n' % args.pipeline ) fp.write('\n') for i, rule in enumerate(rules): li = '%s. ' % (i + 1) fp.write('%s%s\n' % ( li, indent(get_doc(rule), ' ' * len(li)) )) fp.write('\n')
Generates documentation for signature generation pipeline
def mobile(self): """ Access the mobile :returns: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList :rtype: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList """ if self._mobile is None: self._mobile = MobileList(self._version, account_sid=self._solution['account_sid'], ) return self._mobile
Access the mobile :returns: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList :rtype: twilio.rest.api.v2010.account.incoming_phone_number.mobile.MobileList
def get_context_data(self, **kwargs): """ Insert the form into the context dict. """ for key in self.get_form_class_keys(): kwargs['{}_form'.format(key)] = self.get_form(key) return super(FormMixin, self).get_context_data(**kwargs)
Insert the form into the context dict.
def _get_dx_tree(xy, degree): """ 0 1*(0, 0) 0 2*(1, 0) 1*(0, 1) 0 3*(2, 0) 2*(1, 1) 1*(0, 2) 0 ... ... ... ... """ x, y = xy # build smaller tree one = numpy.array([numpy.ones(x.shape, dtype=int)]) tree = [one] for d in range(1, degree): tree.append( numpy.concatenate( [ # Integer division `//` would be nice here, but # <https://github.com/sympy/sympy/issues/14542>. [tree[-1][0] / d * (d + 1) * x], tree[-1] * y, ] ) ) # append zeros zero = numpy.array([numpy.zeros(x.shape, dtype=int)]) tree = [zero] + [numpy.concatenate([t, zero]) for t in tree] return tree
0 1*(0, 0) 0 2*(1, 0) 1*(0, 1) 0 3*(2, 0) 2*(1, 1) 1*(0, 2) 0 ... ... ... ...
def update_annotations(self): """Update annotations made by the user, including bookmarks and events. Depending on the settings, it might add the bookmarks to overview and traces. """ start_time = self.parent.overview.start_time if self.parent.notes.annot is None: all_annot = [] else: bookmarks = self.parent.notes.annot.get_bookmarks() events = self.get_selected_events() all_annot = bookmarks + events all_annot = sorted(all_annot, key=lambda x: x['start']) self.idx_annot_list.clearContents() self.idx_annot_list.setRowCount(len(all_annot)) for i, mrk in enumerate(all_annot): abs_time = (start_time + timedelta(seconds=mrk['start'])).strftime('%H:%M:%S') dur = timedelta(seconds=mrk['end'] - mrk['start']) duration = '{0:02d}.{1:03d}'.format(dur.seconds, round(dur.microseconds / 1000)) item_time = QTableWidgetItem(abs_time) item_duration = QTableWidgetItem(duration) item_name = QTableWidgetItem(mrk['name']) if mrk in bookmarks: item_type = QTableWidgetItem('bookmark') color = self.parent.value('annot_bookmark_color') else: item_type = QTableWidgetItem('event') color = convert_name_to_color(mrk['name']) chan = mrk['chan'] if isinstance(chan, (tuple, list)): chan = ', '.join(chan) item_chan = QTableWidgetItem(chan) item_time.setForeground(QColor(color)) item_duration.setForeground(QColor(color)) item_name.setForeground(QColor(color)) item_type.setForeground(QColor(color)) item_chan.setForeground(QColor(color)) self.idx_annot_list.setItem(i, 0, item_time) self.idx_annot_list.setItem(i, 1, item_duration) self.idx_annot_list.setItem(i, 2, item_name) self.idx_annot_list.setItem(i, 3, item_type) self.idx_annot_list.setItem(i, 4, item_chan) # store information about the time as list (easy to access) annot_start = [ann['start'] for ann in all_annot] annot_end = [ann['end'] for ann in all_annot] annot_name = [ann['name'] for ann in all_annot] self.idx_annot_list.setProperty('start', annot_start) self.idx_annot_list.setProperty('end', annot_end) self.idx_annot_list.setProperty('name', annot_name) if self.parent.traces.data is not None: self.parent.traces.display_annotations() self.parent.overview.display_annotations()
Update annotations made by the user, including bookmarks and events. Depending on the settings, it might add the bookmarks to overview and traces.
def run(cmd, printOutput=False, exceptionOnError=False, warningOnError=True, parseForRegEx=None, regExFlags=0, printOutputIfParsed=False, printErrorsIfParsed=False, exceptionIfParsed=False, dryrun=None): """Run cmd and return (status,output,parsedOutput). Unless dryrun is set True, cmd is run using subprocess.Popen. It returns the exit status from cmd, its output, and any output found by searching for the regular expression parseForRegEx. cmd is logged at the debug level. The parameters are: cmd The command to be executed in either a string or a sequence of program arguments. If cmd is a string (str or unicode), it will be executed through a shell (see subprocess for a discussion of associated security risks). printOutput Specifies if output is written to stdout. exceptionOnError Specifies if a non-zero exit status of cmd should raise an Exception. warningOnError Specifies whether to log a warning in case of a non-zero exit status of cmd. parseForRegEx If not None, search cmd output for regular expression. regExFlags Any regular expression flags used with parseForRegEx. printOutputIfParsed Specifies if any matches found by parseForRegEx should be printed to stdout. printErrorsIfParsed Specifies if any matches found by parseForRegEx should be logged as error. exceptionIfParsed Specifies if an Exception should be raised if parseForRegEx finds any matches. dryrun If set True, cmd is only logged to debug and nothing is run. Returns (0,'',None) in this case. """ if dryrun: debug('would run cmd: %s' % cmd) return (0, '', None) debug('running cmd: %s' % cmd) outputBuffer = [] interactive = os.environ.get('PYTHONINSPECT',None) if interactive: del os.environ['PYTHONINSPECT'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=isinstance(cmd, (str,unicode)), universal_newlines=True) if interactive: os.environ['PYTHONINSPECT'] = interactive for line in iter(p.stdout.readline, ""): outputBuffer.append(line.strip('\n')) if printOutput: sys.stdout.write(line) status = p.wait() output = '\n'.join(outputBuffer) if status and exceptionOnError: raise Exception('Error %s running command: %s: %s' % (status, cmd, output)) if status and warningOnError: warning('Error %s running command: %s: %s' % (status, cmd, output)) if parseForRegEx: regex = re.compile(parseForRegEx, regExFlags) parsedOutput = '\n'.join([s for s in outputBuffer if regex.search(s)]) else: parsedOutput = None if printOutputIfParsed and parsedOutput and not printOutput: print output if printErrorsIfParsed and parsedOutput: for l in parsedOutput.strip().split('\n'): error('found in command output: %s' % l) if exceptionIfParsed and parsedOutput: raise Exception('Errors found in command output - please check') return (status, output, parsedOutput)
Run cmd and return (status,output,parsedOutput). Unless dryrun is set True, cmd is run using subprocess.Popen. It returns the exit status from cmd, its output, and any output found by searching for the regular expression parseForRegEx. cmd is logged at the debug level. The parameters are: cmd The command to be executed in either a string or a sequence of program arguments. If cmd is a string (str or unicode), it will be executed through a shell (see subprocess for a discussion of associated security risks). printOutput Specifies if output is written to stdout. exceptionOnError Specifies if a non-zero exit status of cmd should raise an Exception. warningOnError Specifies whether to log a warning in case of a non-zero exit status of cmd. parseForRegEx If not None, search cmd output for regular expression. regExFlags Any regular expression flags used with parseForRegEx. printOutputIfParsed Specifies if any matches found by parseForRegEx should be printed to stdout. printErrorsIfParsed Specifies if any matches found by parseForRegEx should be logged as error. exceptionIfParsed Specifies if an Exception should be raised if parseForRegEx finds any matches. dryrun If set True, cmd is only logged to debug and nothing is run. Returns (0,'',None) in this case.
def is_fe_ready(self): '''Get FEI4 status of module. If FEI4 is not ready, resetting service records is necessary to bring the FEI4 to a defined state. Returns ------- value : bool True if FEI4 is ready, False if the FEI4 was powered up recently and is not ready. ''' with self.readout(fill_buffer=True, callback=None, errback=None): commands = [] commands.extend(self.register.get_commands("ConfMode")) commands.extend(self.register.get_commands("RdRegister", address=[1])) # commands.extend(self.register.get_commands("RunMode")) self.register_utils.send_commands(commands) data = self.read_data() if len(data) != 0: return True if FEI4Record(data[-1], self.register.chip_flavor) == 'VR' else False else: return False
Get FEI4 status of module. If FEI4 is not ready, resetting service records is necessary to bring the FEI4 to a defined state. Returns ------- value : bool True if FEI4 is ready, False if the FEI4 was powered up recently and is not ready.
def to_yaml(cls, representer, node): """How to serialize this class back to yaml.""" return representer.represent_scalar(cls.yaml_tag, node.value)
How to serialize this class back to yaml.
def OnCut(self, event): """Clipboard cut event handler""" entry_line = \ self.main_window.entry_line_panel.entry_line_panel.entry_line if wx.Window.FindFocus() != entry_line: selection = self.main_window.grid.selection with undo.group(_("Cut")): data = self.main_window.actions.cut(selection) self.main_window.clipboard.set_clipboard(data) self.main_window.grid.ForceRefresh() else: entry_line.Cut() event.Skip()
Clipboard cut event handler
def reset_caches(self, **kwargs): """ Called by ``__init__()`` to initialise the caches for a helper instance. It is also called by Django's ``setting_changed`` signal to clear the caches when changes to settings are made. Although it requires slightly more memory, separate dictionaries are used for raw values, models, modules and other objects to help with lookup performance for each type. """ self._raw_cache = {} self._models_cache = {} self._modules_cache = {} self._objects_cache = {}
Called by ``__init__()`` to initialise the caches for a helper instance. It is also called by Django's ``setting_changed`` signal to clear the caches when changes to settings are made. Although it requires slightly more memory, separate dictionaries are used for raw values, models, modules and other objects to help with lookup performance for each type.
def to_list(self): """ Set the current encoder output to :class:`giraffez.Row` objects and returns the cursor. This is the default value so it is not necessary to select this unless the encoder settings have been changed already. """ self.conn.set_encoding(ROW_ENCODING_LIST) self.processor = lambda x, y: Row(x, y) return self
Set the current encoder output to :class:`giraffez.Row` objects and returns the cursor. This is the default value so it is not necessary to select this unless the encoder settings have been changed already.
def gitrepo(cwd): """Return hash of Git data that can be used to display more information to users. Example: "git": { "head": { "id": "5e837ce92220be64821128a70f6093f836dd2c05", "author_name": "Wil Gieseler", "author_email": "wil@example.com", "committer_name": "Wil Gieseler", "committer_email": "wil@example.com", "message": "depend on simplecov >= 0.7" }, "branch": "master", "remotes": [{ "name": "origin", "url": "https://github.com/lemurheavy/coveralls-ruby.git" }] } From https://github.com/coagulant/coveralls-python (with MIT license). """ repo = Repository(cwd) if not repo.valid(): return {} return { 'head': { 'id': repo.gitlog('%H'), 'author_name': repo.gitlog('%aN'), 'author_email': repo.gitlog('%ae'), 'committer_name': repo.gitlog('%cN'), 'committer_email': repo.gitlog('%ce'), 'message': repo.gitlog('%s') }, 'branch': os.environ.get('TRAVIS_BRANCH', os.environ.get('APPVEYOR_REPO_BRANCH', repo.git('rev-parse', '--abbrev-ref', 'HEAD')[1].strip())), 'remotes': [{'name': line.split()[0], 'url': line.split()[1]} for line in repo.git('remote', '-v')[1] if '(fetch)' in line] }
Return hash of Git data that can be used to display more information to users. Example: "git": { "head": { "id": "5e837ce92220be64821128a70f6093f836dd2c05", "author_name": "Wil Gieseler", "author_email": "wil@example.com", "committer_name": "Wil Gieseler", "committer_email": "wil@example.com", "message": "depend on simplecov >= 0.7" }, "branch": "master", "remotes": [{ "name": "origin", "url": "https://github.com/lemurheavy/coveralls-ruby.git" }] } From https://github.com/coagulant/coveralls-python (with MIT license).
def patch_related_object_descriptor_caching(ro_descriptor): """ Patch SingleRelatedObjectDescriptor or ReverseSingleRelatedObjectDescriptor to use language-aware caching. """ class NewSingleObjectDescriptor(LanguageCacheSingleObjectDescriptor, ro_descriptor.__class__): pass if django.VERSION[0] == 2: ro_descriptor.related.get_cache_name = partial( NewSingleObjectDescriptor.get_cache_name, ro_descriptor, ) ro_descriptor.accessor = ro_descriptor.related.get_accessor_name() ro_descriptor.__class__ = NewSingleObjectDescriptor
Patch SingleRelatedObjectDescriptor or ReverseSingleRelatedObjectDescriptor to use language-aware caching.
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the ProtocolVersion struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined. """ local_stream = utils.BytearrayStream() if self._major: self._major.write(local_stream, kmip_version=kmip_version) else: raise ValueError( "Invalid struct missing the major protocol version number." ) if self._minor: self._minor.write(local_stream, kmip_version=kmip_version) else: raise ValueError( "Invalid struct missing the minor protocol version number." ) self.length = local_stream.length() super(ProtocolVersion, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the ProtocolVersion struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
def unpack_scalar(cls, dataset, data): """ Given a dataset object and data in the appropriate format for the interface, return a simple scalar. """ if (len(data.data_vars) == 1 and len(data[dataset.vdims[0].name].shape) == 0): return data[dataset.vdims[0].name].item() return data
Given a dataset object and data in the appropriate format for the interface, return a simple scalar.
def show_minimum_needs(self): """Show the minimum needs dialog.""" # import here only so that it is AFTER i18n set up from safe.gui.tools.minimum_needs.needs_calculator_dialog import ( NeedsCalculatorDialog ) dialog = NeedsCalculatorDialog(self.iface.mainWindow()) dialog.exec_()
Show the minimum needs dialog.
def append_child_field(self, linenum, indent, field_name, field_value): """ :param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int :param path: :type path: Path :param field_name: :type field_name: str :param field_value: :type field_value: str """ frame = self.current_frame() assert isinstance(frame,RootFrame) or isinstance(frame,ContainerFrame) and frame.indent < indent if frame.container.contains(ROOT_PATH, field_name): raise KeyError("field {0} exists in container at path {1}".format(field_name, frame.path)) frame.container.put_field(ROOT_PATH, field_name, field_value) frame = FieldFrame(linenum, indent, frame.path, frame.container, field_name, field_value) self.push_frame(frame)
:param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int :param path: :type path: Path :param field_name: :type field_name: str :param field_value: :type field_value: str
def prsint(string): """ Parse a string as an integer, encapsulating error handling. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html :param string: String representing an integer. :type string: str :return: Integer value obtained by parsing string. :rtype: int """ string = stypes.stringToCharP(string) intval = ctypes.c_int() libspice.prsint_c(string, ctypes.byref(intval)) return intval.value
Parse a string as an integer, encapsulating error handling. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/prsint_c.html :param string: String representing an integer. :type string: str :return: Integer value obtained by parsing string. :rtype: int
def expression(callable, rule_name, grammar): """Turn a plain callable into an Expression. The callable can be of this simple form:: def foo(text, pos): '''If this custom expression matches starting at text[pos], return the index where it stops matching. Otherwise, return None.''' if the expression matched: return end_pos If there child nodes to return, return a tuple:: return end_pos, children If the expression doesn't match at the given ``pos`` at all... :: return None If your callable needs to make sub-calls to other rules in the grammar or do error reporting, it can take this form, gaining additional arguments:: def foo(text, pos, cache, error, grammar): # Call out to other rules: node = grammar['another_rule'].match_core(text, pos, cache, error) ... # Return values as above. The return value of the callable, if an int or a tuple, will be automatically transmuted into a :class:`~parsimonious.Node`. If it returns a Node-like class directly, it will be passed through unchanged. :arg rule_name: The rule name to attach to the resulting :class:`~parsimonious.Expression` :arg grammar: The :class:`~parsimonious.Grammar` this expression will be a part of, to make delegating to other rules possible """ num_args = len(getargspec(callable).args) if num_args == 2: is_simple = True elif num_args == 5: is_simple = False else: raise RuntimeError("Custom rule functions must take either 2 or 5 " "arguments, not %s." % num_args) class AdHocExpression(Expression): def _uncached_match(self, text, pos, cache, error): result = (callable(text, pos) if is_simple else callable(text, pos, cache, error, grammar)) if isinstance(result, integer_types): end, children = result, None elif isinstance(result, tuple): end, children = result else: # Node or None return result return Node(self, text, pos, end, children=children) def _as_rhs(self): return '{custom function "%s"}' % callable.__name__ return AdHocExpression(name=rule_name)
Turn a plain callable into an Expression. The callable can be of this simple form:: def foo(text, pos): '''If this custom expression matches starting at text[pos], return the index where it stops matching. Otherwise, return None.''' if the expression matched: return end_pos If there child nodes to return, return a tuple:: return end_pos, children If the expression doesn't match at the given ``pos`` at all... :: return None If your callable needs to make sub-calls to other rules in the grammar or do error reporting, it can take this form, gaining additional arguments:: def foo(text, pos, cache, error, grammar): # Call out to other rules: node = grammar['another_rule'].match_core(text, pos, cache, error) ... # Return values as above. The return value of the callable, if an int or a tuple, will be automatically transmuted into a :class:`~parsimonious.Node`. If it returns a Node-like class directly, it will be passed through unchanged. :arg rule_name: The rule name to attach to the resulting :class:`~parsimonious.Expression` :arg grammar: The :class:`~parsimonious.Grammar` this expression will be a part of, to make delegating to other rules possible
def taubin(script, iterations=10, t_lambda=0.5, t_mu=-0.53, selected=False): """ The lambda & mu Taubin smoothing, it make two steps of smoothing, forth and back, for each iteration. Based on: Gabriel Taubin "A signal processing approach to fair surface design" Siggraph 1995 Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the taubin smoothing is iterated. Usually it requires a larger number of iteration than the classical laplacian. t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm t_mu (float): The mu parameter of the Taubin Smoothing algorithm selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Taubin Smooth">\n', ' <Param name="lambda" ', 'value="{}" '.format(t_lambda), 'description="Lambda" ', 'type="RichFloat" ', '/>\n', ' <Param name="mu" ', 'value="{}" '.format(t_mu), 'description="mu" ', 'type="RichFloat" ', '/>\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
The lambda & mu Taubin smoothing, it make two steps of smoothing, forth and back, for each iteration. Based on: Gabriel Taubin "A signal processing approach to fair surface design" Siggraph 1995 Args: script: the FilterScript object or script filename to write the filter to. iterations (int): The number of times that the taubin smoothing is iterated. Usually it requires a larger number of iteration than the classical laplacian. t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm t_mu (float): The mu parameter of the Taubin Smoothing algorithm selected (bool): If selected the filter is performed only on the selected faces Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
def main(args=None): """Command line interface. :param list args: command line options (defaults to sys.argv) :returns: exit code :rtype: int """ parser = ArgumentParser( prog='baseline', description=DESCRIPTION) parser.add_argument( 'path', nargs='*', help='module or directory path') parser.add_argument( '--movepath', help='location to move script updates') parser.add_argument( '-w', '--walk', action='store_true', help='recursively walk directories') args = parser.parse_args(args) paths = args.path or ['.'] paths = [path for pattern in paths for path in glob(pattern)] if args.walk: for dirpath in (p for p in paths if os.path.isdir(p)): for root, _dirs, files in os.walk(dirpath): paths += (os.path.join(root, filename) for filename in files) else: for dirpath in (p for p in paths if os.path.isdir(p)): paths += (os.path.join(dirpath, pth) for pth in os.listdir(dirpath)) update_paths = [ os.path.relpath(p) for p in paths if p.lower().endswith(UPDATE_EXT)] exitcode = 0 if update_paths: script_paths = [pth[:-len(UPDATE_EXT)] + '.py' for pth in update_paths] print('Found updates for:') for path in script_paths: print(' ' + path) print() if not args.movepath: try: input('Hit [ENTER] to update, [Ctrl-C] to cancel ') except KeyboardInterrupt: print() print('Update canceled.') exitcode = 1 else: print() if exitcode == 0: for script_path, update_path in zip(script_paths, update_paths): if args.movepath: script_path = os.path.join(args.movepath, script_path) if update_path.startswith('..'): raise RuntimeError( 'destination outside of move path: ' + script_path) script_dirpath = os.path.dirname(script_path) if not os.path.isdir(script_dirpath): os.makedirs(script_dirpath) with open(update_path) as update: new_content = update.read() with open(script_path, 'w') as script: script.write(new_content) os.remove(update_path) print(update_path + ' -> ' + script_path) return exitcode
Command line interface. :param list args: command line options (defaults to sys.argv) :returns: exit code :rtype: int
def load_xml(self, filepath): """Loads the values of the configuration variables from an XML path.""" from os import path import xml.etree.ElementTree as ET #Make sure the file exists and then import it as XML and read the values out. uxpath = path.expanduser(filepath) if path.isfile(uxpath): tree = ET.parse(uxpath) vms("Parsing global settings from {}.".format(uxpath)) root = tree.getroot() for child in root: if child.tag == "var": self._vardict[child.attrib["name"]] = child.attrib["value"]
Loads the values of the configuration variables from an XML path.
def _find_relations(self): """Find all relevant relation elements and return them in a list.""" # Get all extractions extractions = \ list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]")) # Get relations from extractions relations = [] for e in extractions: label_set = set(e.get('labels', [])) # If this is a DirectedRelation if 'DirectedRelation' in label_set: self.relation_dict[e['@id']] = e subtype = e.get('subtype') if any(t in subtype for t in polarities.keys()): relations.append((subtype, e)) # If this is an Event or an Entity if {'Event', 'Entity'} & label_set: self.concept_dict[e['@id']] = e if not relations and not self.relation_dict: logger.info("No relations found.") else: logger.info('%d relations of types %s found' % (len(relations), ', '.join(polarities.keys()))) logger.info('%d relations in dict.' % len(self.relation_dict)) logger.info('%d concepts found.' % len(self.concept_dict)) return relations
Find all relevant relation elements and return them in a list.
def from_credentials_db(client_secrets, storage, api_version="v3", readonly=False, http_client=None, ga_hook=None): """Create a client for a web or installed application. Create a client with a credentials stored in stagecraft db. Args: client_secrets: dict, client secrets (downloadable from Google API Console) storage: stagecraft.apps.collectors.libs.ga.CredentialStorage, a Storage implementation to store credentials. readonly: bool, default False, if True only readonly access is requested from GA. http_client: httplib2.Http, Override the default http client used. ga_hook: function, a hook that is called every time a query is made against GA. """ credentials = storage.get() return Client(_build(credentials, api_version, http_client), ga_hook)
Create a client for a web or installed application. Create a client with a credentials stored in stagecraft db. Args: client_secrets: dict, client secrets (downloadable from Google API Console) storage: stagecraft.apps.collectors.libs.ga.CredentialStorage, a Storage implementation to store credentials. readonly: bool, default False, if True only readonly access is requested from GA. http_client: httplib2.Http, Override the default http client used. ga_hook: function, a hook that is called every time a query is made against GA.
def tgt_vocab(self): """Target Vocabulary of the Dataset. Returns ------- tgt_vocab : Vocab Target vocabulary. """ if self._tgt_vocab is None: tgt_vocab_file_name, tgt_vocab_hash = \ self._data_file[self._pair_key]['vocab' + '_' + self._tgt_lang] [tgt_vocab_path] = self._fetch_data_path([(tgt_vocab_file_name, tgt_vocab_hash)]) with io.open(tgt_vocab_path, 'r', encoding='utf-8') as in_file: self._tgt_vocab = Vocab.from_json(in_file.read()) return self._tgt_vocab
Target Vocabulary of the Dataset. Returns ------- tgt_vocab : Vocab Target vocabulary.
def notifications(self): """ Access the notifications :returns: twilio.rest.notify.v1.service.notification.NotificationList :rtype: twilio.rest.notify.v1.service.notification.NotificationList """ if self._notifications is None: self._notifications = NotificationList(self._version, service_sid=self._solution['sid'], ) return self._notifications
Access the notifications :returns: twilio.rest.notify.v1.service.notification.NotificationList :rtype: twilio.rest.notify.v1.service.notification.NotificationList
def file_arg(arg): """ Parses a file argument, i.e. starts with file:// """ prefix = 'file://' if arg.startswith(prefix): return os.path.abspath(arg[len(prefix):]) else: msg = 'Invalid file argument "{}", does not begin with "file://"' raise argparse.ArgumentTypeError(msg.format(arg))
Parses a file argument, i.e. starts with file://
def serialize(self, data): """ Write a sequence of uniform hazard spectra to the specified file. :param data: Iterable of UHS data. Each datum must be an object with the following attributes: * imls: A sequence of Intensity Measure Levels * location: An object representing the location of the curve; must have `x` and `y` to represent lon and lat, respectively. """ gml_ns = nrml.SERIALIZE_NS_MAP['gml'] with open(self.dest, 'wb') as fh: root = et.Element('nrml') uh_spectra = et.SubElement(root, 'uniformHazardSpectra') _set_metadata(uh_spectra, self.metadata, _ATTR_MAP) periods_elem = et.SubElement(uh_spectra, 'periods') periods_elem.text = ' '.join([str(x) for x in self.metadata['periods']]) for uhs in data: uhs_elem = et.SubElement(uh_spectra, 'uhs') gml_point = et.SubElement(uhs_elem, '{%s}Point' % gml_ns) gml_pos = et.SubElement(gml_point, '{%s}pos' % gml_ns) gml_pos.text = '%s %s' % (uhs.location.x, uhs.location.y) imls_elem = et.SubElement(uhs_elem, 'IMLs') imls_elem.text = ' '.join(['%10.7E' % x for x in uhs.imls]) nrml.write(list(root), fh)
Write a sequence of uniform hazard spectra to the specified file. :param data: Iterable of UHS data. Each datum must be an object with the following attributes: * imls: A sequence of Intensity Measure Levels * location: An object representing the location of the curve; must have `x` and `y` to represent lon and lat, respectively.
def add_hash(self, value): """Add a Node based on a precomputed, hex encoded, hash value. """ self.leaves.append(Node(codecs.decode(value, 'hex_codec'), prehashed=True))
Add a Node based on a precomputed, hex encoded, hash value.
async def reply_voice(self, voice: typing.Union[base.InputFile, base.String], caption: typing.Union[base.String, None] = None, duration: typing.Union[base.Integer, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_markup=None, reply=True) -> Message: """ Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .ogg file encoded with OPUS (other formats may be sent as Audio or Document). Source: https://core.telegram.org/bots/api#sendvoice :param voice: Audio file to send. :type voice: :obj:`typing.Union[base.InputFile, base.String]` :param caption: Voice message caption, 0-200 characters :type caption: :obj:`typing.Union[base.String, None]` :param duration: Duration of the voice message in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message` """ return await self.bot.send_voice(chat_id=self.chat.id, voice=voice, caption=caption, duration=duration, disable_notification=disable_notification, reply_to_message_id=self.message_id if reply else None, reply_markup=reply_markup)
Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .ogg file encoded with OPUS (other formats may be sent as Audio or Document). Source: https://core.telegram.org/bots/api#sendvoice :param voice: Audio file to send. :type voice: :obj:`typing.Union[base.InputFile, base.String]` :param caption: Voice message caption, 0-200 characters :type caption: :obj:`typing.Union[base.String, None]` :param duration: Duration of the voice message in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message`
def send(self, data=None, headers=None, ttl=0, gcm_key=None, reg_id=None, content_encoding="aes128gcm", curl=False, timeout=None): """Encode and send the data to the Push Service. :param data: A serialized block of data (see encode() ). :type data: str :param headers: A dictionary containing any additional HTTP headers. :type headers: dict :param ttl: The Time To Live in seconds for this message if the recipient is not online. (Defaults to "0", which discards the message immediately if the recipient is unavailable.) :type ttl: int :param gcm_key: API key obtained from the Google Developer Console. Needed if endpoint is https://android.googleapis.com/gcm/send :type gcm_key: string :param reg_id: registration id of the recipient. If not provided, it will be extracted from the endpoint. :type reg_id: str :param content_encoding: ECE content encoding (defaults to "aes128gcm") :type content_encoding: str :param curl: Display output as `curl` command instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple """ # Encode the data. if headers is None: headers = dict() encoded = {} headers = CaseInsensitiveDict(headers) if data: encoded = self.encode(data, content_encoding) if "crypto_key" in encoded: # Append the p256dh to the end of any existing crypto-key crypto_key = headers.get("crypto-key", "") if crypto_key: # due to some confusion by a push service provider, we # should use ';' instead of ',' to append the headers. # see # https://github.com/webpush-wg/webpush-encryption/issues/6 crypto_key += ';' crypto_key += ( "dh=" + encoded["crypto_key"].decode('utf8')) headers.update({ 'crypto-key': crypto_key }) if "salt" in encoded: headers.update({ 'encryption': "salt=" + encoded['salt'].decode('utf8') }) headers.update({ 'content-encoding': content_encoding, }) if gcm_key: # guess if it is a legacy GCM project key or actual FCM key # gcm keys are all about 40 chars (use 100 for confidence), # fcm keys are 153-175 chars if len(gcm_key) < 100: endpoint = 'https://android.googleapis.com/gcm/send' else: endpoint = 'https://fcm.googleapis.com/fcm/send' reg_ids = [] if not reg_id: reg_id = self.subscription_info['endpoint'].rsplit('/', 1)[-1] reg_ids.append(reg_id) gcm_data = dict() gcm_data['registration_ids'] = reg_ids if data: gcm_data['raw_data'] = base64.b64encode( encoded.get('body')).decode('utf8') gcm_data['time_to_live'] = int( headers['ttl'] if 'ttl' in headers else ttl) encoded_data = json.dumps(gcm_data) headers.update({ 'Authorization': 'key='+gcm_key, 'Content-Type': 'application/json', }) else: encoded_data = encoded.get('body') endpoint = self.subscription_info['endpoint'] if 'ttl' not in headers or ttl: headers['ttl'] = str(ttl or 0) # Additionally useful headers: # Authorization / Crypto-Key (VAPID headers) if curl: return self.as_curl(endpoint, encoded_data, headers) return self.requests_method.post(endpoint, data=encoded_data, headers=headers, timeout=timeout)
Encode and send the data to the Push Service. :param data: A serialized block of data (see encode() ). :type data: str :param headers: A dictionary containing any additional HTTP headers. :type headers: dict :param ttl: The Time To Live in seconds for this message if the recipient is not online. (Defaults to "0", which discards the message immediately if the recipient is unavailable.) :type ttl: int :param gcm_key: API key obtained from the Google Developer Console. Needed if endpoint is https://android.googleapis.com/gcm/send :type gcm_key: string :param reg_id: registration id of the recipient. If not provided, it will be extracted from the endpoint. :type reg_id: str :param content_encoding: ECE content encoding (defaults to "aes128gcm") :type content_encoding: str :param curl: Display output as `curl` command instead of sending :type curl: bool :param timeout: POST requests timeout :type timeout: float or tuple
def complete_io(self, iocb, msg): """Called by a handler to return data to the client.""" if _debug: IOController._debug("complete_io %r %r", iocb, msg) # if it completed, leave it alone if iocb.ioState == COMPLETED: pass # if it already aborted, leave it alone elif iocb.ioState == ABORTED: pass else: # change the state iocb.ioState = COMPLETED iocb.ioResponse = msg # notify the client iocb.trigger()
Called by a handler to return data to the client.
def validate_units(self): """Ensure that wavelenth unit belongs to the correct class. There is no check for throughput because it is unitless. Raises ------ TypeError Wavelength unit is not `~pysynphot.units.WaveUnits`. """ if (not isinstance(self.waveunits, units.WaveUnits)): raise TypeError("%s is not a valid WaveUnit" % self.waveunits)
Ensure that wavelenth unit belongs to the correct class. There is no check for throughput because it is unitless. Raises ------ TypeError Wavelength unit is not `~pysynphot.units.WaveUnits`.
def add_hook_sub(self, address, topics, callback): """Specify a *callback* in the same stream (thread) as the main receive loop. The callback will be called with the received messages from the specified subscription. Good for operations, which is required to be done in the same thread as the main recieve loop (e.q operations on the underlying sockets). """ LOGGER.info("Subscriber adding SUB hook %s for topics %s", str(address), str(topics)) socket = get_context().socket(SUB) for t__ in self._magickfy_topics(topics): socket.setsockopt_string(SUBSCRIBE, six.text_type(t__)) socket.connect(address) self._add_hook(socket, callback)
Specify a *callback* in the same stream (thread) as the main receive loop. The callback will be called with the received messages from the specified subscription. Good for operations, which is required to be done in the same thread as the main recieve loop (e.q operations on the underlying sockets).
def start(self): """ Starts the watchdog timer. """ self._timer = Timer(self.time, self.handler) self._timer.daemon = True self._timer.start() return
Starts the watchdog timer.
def use_options(allowed): """ Decorator that logs warnings when unpermitted options are passed into its wrapped function. Requires that wrapped function has an keyword-only argument named `options`. If wrapped function has {options} in its docstring, fills in with the docs for allowed options. Args: allowed (list str): list of option keys allowed. If the wrapped function is called with an option not in allowed, log a warning. All values in allowed must also be present in `defaults`. Returns: Wrapped function with options validation. >>> @use_options(['title']) ... def test(*, options={}): return options['title'] >>> test(options={'title': 'Hello'}) 'Hello' >>> # test(options={'not_allowed': 123}) # Also logs error message '' """ def update_docstring(f): _update_option_docstring(f, allowed) @functools.wraps(f) def check_options(*args, **kwargs): options = kwargs.get('options', {}) not_allowed = [ option for option in options if option not in allowed # Don't validate private options and not option.startswith('_') ] if not_allowed: logging.warning( 'The following options are not supported by ' 'this function and will likely result in ' 'undefined behavior: {}.'.format(not_allowed) ) return f(*args, **kwargs) return check_options return update_docstring
Decorator that logs warnings when unpermitted options are passed into its wrapped function. Requires that wrapped function has an keyword-only argument named `options`. If wrapped function has {options} in its docstring, fills in with the docs for allowed options. Args: allowed (list str): list of option keys allowed. If the wrapped function is called with an option not in allowed, log a warning. All values in allowed must also be present in `defaults`. Returns: Wrapped function with options validation. >>> @use_options(['title']) ... def test(*, options={}): return options['title'] >>> test(options={'title': 'Hello'}) 'Hello' >>> # test(options={'not_allowed': 123}) # Also logs error message ''
def record_manifest(self): """ Returns a dictionary representing a serialized state of the service. """ data = super(RabbitMQSatchel, self).record_manifest() params = sorted(list(self.get_user_vhosts())) # [(user, password, vhost)] data['rabbitmq_all_site_vhosts'] = params data['sites'] = list(self.genv.sites or []) return data
Returns a dictionary representing a serialized state of the service.
def metadata(self, exportFormat="default", output=None, saveFolder=None, fileName=None): """ exports metadata to the various supported formats Inputs: exportFormats - export metadata to the following formats: fgdc, inspire, iso19139, iso19139-3.2, iso19115, arcgis, and default. default means the value will be ISO 19139 Metadata Implementation Specification GML3.2 or the default format set for your ArcGIS online organizational account. output - html or none. Html returns values as html text. saveFolder - Default is None. If provided the metadata file will be saved to that location. fileName - Default is None. If provided, this will be the name of the file on your local drive. Output: path to file or string """ url = "%s/info/metadata/metadata.xml" % self.root allowedFormats = ["fgdc", "inspire", "iso19139", "iso19139-3.2", "iso19115", "arcgis", "default"] if not exportFormat.lower() in allowedFormats: raise Exception("Invalid exportFormat") if exportFormat.lower() == "arcgis": params = {} else: params = { "format" : exportFormat } if exportFormat.lower() == "default": exportFormat = "" params = { "format" : exportFormat } if output is not None: params['output'] = output if saveFolder is None: saveFolder = tempfile.gettempdir() if fileName is None: fileName = "metadata.xml" if output is None: result = self._get(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, out_folder=saveFolder, file_name=fileName) if os.path.isfile(result) == False: with open(os.path.join(saveFolder, fileName), 'wb') as writer: writer.write(result) writer.flush() writer.close() return os.path.join(saveFolder, fileName) return result else: return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
exports metadata to the various supported formats Inputs: exportFormats - export metadata to the following formats: fgdc, inspire, iso19139, iso19139-3.2, iso19115, arcgis, and default. default means the value will be ISO 19139 Metadata Implementation Specification GML3.2 or the default format set for your ArcGIS online organizational account. output - html or none. Html returns values as html text. saveFolder - Default is None. If provided the metadata file will be saved to that location. fileName - Default is None. If provided, this will be the name of the file on your local drive. Output: path to file or string
def create(cls, datacenter, memory, cores, ip_version, bandwidth, login, password, hostname, image, run, background, sshkey, size, vlan, ip, script, script_args, ssh): """Create a new virtual machine.""" from gandi.cli.modules.network import Ip, Iface if not background and not cls.intty(): background = True datacenter_id_ = int(Datacenter.usable_id(datacenter)) if not hostname: hostname = randomstring('vm') disk_name = 'sys_%s' % hostname[2:] else: disk_name = 'sys_%s' % hostname.replace('.', '') vm_params = { 'hostname': hostname, 'datacenter_id': datacenter_id_, 'memory': memory, 'cores': cores, } if login: vm_params['login'] = login if run: vm_params['run'] = run if password: vm_params['password'] = password if ip_version: vm_params['ip_version'] = ip_version vm_params['bandwidth'] = bandwidth if script: with open(script) as fd: vm_params['script'] = fd.read() if script_args: vm_params['script_args'] = script_args vm_params.update(cls.convert_sshkey(sshkey)) # XXX: name of disk is limited to 15 chars in ext2fs, ext3fs # but api allow 255, so we limit to 15 for now disk_params = {'datacenter_id': vm_params['datacenter_id'], 'name': disk_name[:15]} if size: if isinstance(size, tuple): prefix, size = size disk_params['size'] = size sys_disk_id_ = int(Image.usable_id(image, datacenter_id_)) ip_summary = [] if ip_version == 4: ip_summary = ['v4', 'v6'] elif ip_version == 6: ip_summary = ['v6'] if vlan: ip_ = None ip_summary.append('private') if ip: try: ip_ = Ip.info(ip) except Exception: pass else: if not Ip._check_and_detach(ip_, None): return if ip_: iface_id = ip_['iface_id'] else: ip_create = Ip.create(4, vm_params['datacenter_id'], bandwidth, None, vlan, ip) iface_id = ip_create['iface_id'] # if there is a public ip, will attach this one later, else give # the iface to vm.create if not ip_version: vm_params['iface_id'] = iface_id result = cls.call('hosting.vm.create_from', vm_params, disk_params, sys_disk_id_) cls.echo('* Configuration used: %d cores, %dMb memory, ip %s, ' 'image %s, hostname: %s, datacenter: %s' % (cores, memory, '+'.join(ip_summary), image, hostname, datacenter)) # background mode, bail out now (skip interactive part) if background and (not vlan or not ip_version): return result # interactive mode, run a progress bar cls.echo('Creating your Virtual Machine %s.' % hostname) cls.display_progress(result) cls.echo('Your Virtual Machine %s has been created.' % hostname) vm_id = None for oper in result: if oper.get('vm_id'): vm_id = oper.get('vm_id') break if vlan and ip_version: attach = Iface._attach(iface_id, vm_id) if background: return attach if 'ssh_key' not in vm_params and 'keys' not in vm_params: return if vm_id and ip_version: cls.wait_for_sshd(vm_id) if ssh: cls.ssh_keyscan(vm_id) cls.ssh(vm_id, 'root', None)
Create a new virtual machine.
def _get_core_keywords(skw_matches, ckw_matches, spires=False): """Return the output for the field codes. :var skw_matches: dict of {keyword: [info,...]} :var ckw_matches: dict of {keyword: [info,...]} :keyword spires: bool, to get the spires output :return: list of formatted core keywords """ output = {} category = {} def _get_value_kw(kw): """Help to sort the Core keywords.""" i = 0 while kw[i].isdigit(): i += 1 if i > 0: return int(kw[:i]) else: return 0 for skw, info in skw_matches: if skw.core: output[skw.output(spires)] = len(info[0]) category[skw.output(spires)] = skw.type for ckw, info in ckw_matches: if ckw.core: output[ckw.output(spires)] = len(info[0]) else: # test if one of the components is not core i = 0 for c in ckw.getComponents(): if c.core: output[c.output(spires)] = info[1][i] i += 1 output = [{'keyword': key, 'number': value} for key, value in output.iteritems()] return sorted(output, key=lambda x: x['number'], reverse=True)
Return the output for the field codes. :var skw_matches: dict of {keyword: [info,...]} :var ckw_matches: dict of {keyword: [info,...]} :keyword spires: bool, to get the spires output :return: list of formatted core keywords
def _stub_obj(obj): ''' Stub an object directly. ''' # Annoying circular reference requires importing here. Would like to see # this cleaned up. @AW from .mock import Mock # Return an existing stub if isinstance(obj, Stub): return obj # If a Mock object, stub its __call__ if isinstance(obj, Mock): return stub(obj.__call__) # If passed-in a type, assume that we're going to stub out the creation. # See StubNew for the awesome sauce. # if isinstance(obj, types.TypeType): if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType): return StubNew(obj) elif hasattr(__builtins__, 'type') and \ isinstance(obj, __builtins__.type): return StubNew(obj) elif inspect.isclass(obj): return StubNew(obj) # I thought that types.UnboundMethodType differentiated these cases but # apparently not. if isinstance(obj, types.MethodType): # Handle differently if unbound because it's an implicit "any instance" if getattr(obj, 'im_self', None) is None: # Handle the python3 case and py2 filter if hasattr(obj, '__self__'): if obj.__self__ is not None: return StubMethod(obj) if sys.version_info.major == 2: return StubUnboundMethod(obj) else: return StubMethod(obj) # These aren't in the types library if type(obj).__name__ == 'method-wrapper': return StubMethodWrapper(obj) if type(obj).__name__ == 'wrapper_descriptor': raise UnsupportedStub( "must call stub(obj,'%s') for slot wrapper on %s", obj.__name__, obj.__objclass__.__name__) # (Mostly) Lastly, look for properties. # First look for the situation where there's a reference back to the # property. prop = obj if isinstance(getattr(obj, '__self__', None), property): obj = prop.__self__ # Once we've found a property, we have to figure out how to reference # back to the owning class. This is a giant pain and we have to use gc # to find out where it comes from. This code is dense but resolves to # something like this: # >>> gc.get_referrers( foo.x ) # [{'__dict__': <attribute '__dict__' of 'foo' objects>, # 'x': <property object at 0x7f68c99a16d8>, # '__module__': '__main__', # '__weakref__': <attribute '__weakref__' of 'foo' objects>, # '__doc__': None}] if isinstance(obj, property): klass, attr = None, None for ref in gc.get_referrers(obj): if klass and attr: break if isinstance(ref, dict) and ref.get('prop', None) is obj: klass = getattr( ref.get('__dict__', None), '__objclass__', None) for name, val in getattr(klass, '__dict__', {}).items(): if val is obj: attr = name break # In the case of PyPy, we have to check all types that refer to # the property, and see if any of their attrs are the property elif isinstance(ref, type): # Use dir as a means to quickly walk through the class tree for name in dir(ref): if getattr(ref, name) == obj: klass = ref attr = name break if klass and attr: rval = stub(klass, attr) if prop != obj: return stub(rval, prop.__name__) return rval # If a function and it has an associated module, we can mock directly. # Note that this *must* be after properties, otherwise it conflicts with # stubbing out the deleter methods and such # Sadly, builtin functions and methods have the same type, so we have to # use the same stub class even though it's a bit ugly if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType, types.BuiltinMethodType)) and hasattr(obj, '__module__'): return StubFunction(obj) raise UnsupportedStub("can't stub %s", obj)
Stub an object directly.
def replicate(ctx, args): """Make node to be the slave of a master. """ slave = ClusterNode.from_uri(args.node) master = ClusterNode.from_uri(args.master) if not master.is_master(): ctx.abort("Node {!r} is not a master.".format(args.master)) try: slave.replicate(master.name) except redis.ResponseError as e: ctx.abort(str(e)) Cluster.from_node(master).wait()
Make node to be the slave of a master.
def map_structprop_resnums_to_seqprop_resnums(self, resnums, structprop=None, chain_id=None, seqprop=None, use_representatives=False): """Map a residue number in any StructProp + chain ID to any SeqProp's residue number. Args: resnums (int, list): Residue numbers in the structure structprop (StructProp): StructProp object chain_id (str): Chain ID to map from seqprop (SeqProp): SeqProp object use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop, structprop, and chain_id do not need to be defined. Returns: dict: Mapping of structure residue numbers to sequence residue numbers """ resnums = ssbio.utils.force_list(resnums) if use_representatives: seqprop = self.representative_sequence structprop = self.representative_structure chain_id = self.representative_chain if not structprop: raise ValueError('No representative structure set, please specify sequence, structure, and chain ID') else: if not seqprop or not structprop or not chain_id: raise ValueError('Please specify sequence, structure, and chain ID') if structprop.id == self.representative_structure.id: full_structure_id = '{}-{}'.format(structprop.id, chain_id).replace('REP-', '') else: full_structure_id = '{}-{}'.format(structprop.id, chain_id) aln_id = '{}_{}'.format(seqprop.id, full_structure_id) access_key = '{}_chain_index'.format(aln_id) if access_key not in seqprop.letter_annotations: raise KeyError( '{}: structure mapping {} not available in sequence letter annotations. Was alignment parsed? ' 'Run ``align_seqprop_to_structprop`` with ``parse=True``.'.format(access_key, aln_id)) chain = structprop.chains.get_by_id(chain_id) chain_structure_resnum_mapping = chain.seq_record.letter_annotations['structure_resnums'] final_mapping = {} for resnum in resnums: resnum = int(resnum) resnum_index = chain_structure_resnum_mapping.index(resnum) struct_res_singleaa = structprop.chains.get_by_id(chain_id).seq_record[resnum_index] # if resnum not in seqprop.letter_annotations[access_key]: # log.warning('{}-{} -> {}: unable to map residue {} from structure to sequence, ' # 'skipping'.format(structprop.id, chain_id, seqprop.id, resnum)) # continue what = seqprop.letter_annotations[access_key].index(resnum_index+1) # TODO in progress... seq_res_singleaa = seqprop[what] sp_resnum = what + 1 final_mapping[resnum] = sp_resnum # Additionally report if residues are the same - they could be different in the structure though format_data = {'seqprop_id' : seqprop.id, 'seqprop_resid' : seq_res_singleaa, 'seqprop_resnum' : sp_resnum, 'structprop_id' : structprop.id, 'structprop_chid' : chain_id, 'structprop_resid' : struct_res_singleaa, 'structprop_resnum': resnum} if struct_res_singleaa != seq_res_singleaa: log.warning('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} does not match to ' 'structure {structprop_id}-{structprop_chid} residue ' '{structprop_resid}{structprop_resnum}. NOTE: this may be due to ' 'structural differences'.format(**format_data)) else: log.debug('Sequence {seqprop_id} residue {seqprop_resid}{seqprop_resnum} is mapped to ' 'structure {structprop_id}-{structprop_chid} residue ' '{structprop_resid}{structprop_resnum}'.format(**format_data)) return final_mapping
Map a residue number in any StructProp + chain ID to any SeqProp's residue number. Args: resnums (int, list): Residue numbers in the structure structprop (StructProp): StructProp object chain_id (str): Chain ID to map from seqprop (SeqProp): SeqProp object use_representatives (bool): If the representative sequence and structure should be used. If True, seqprop, structprop, and chain_id do not need to be defined. Returns: dict: Mapping of structure residue numbers to sequence residue numbers
def zipWithIndex(self): """ Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)] """ starts = [0] if self.getNumPartitions() > 1: nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect() for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return self.mapPartitionsWithIndex(func)
Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)]
def infer_newX(self, Y_new, optimize=True): """ Infer X for the new observed data *Y_new*. :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the posterior estimation of X and the model that optimize X :rtype: (:class:`~GPy.core.parameterization.variational.VariationalPosterior` and numpy.ndarray, :class:`~GPy.core.model.Model`) """ from ..inference.latent_function_inference.inferenceX import infer_newX return infer_newX(self, Y_new, optimize=optimize)
Infer X for the new observed data *Y_new*. :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the posterior estimation of X and the model that optimize X :rtype: (:class:`~GPy.core.parameterization.variational.VariationalPosterior` and numpy.ndarray, :class:`~GPy.core.model.Model`)
def parse_meta(content, meta_name, source_descr, content_attr_name="content"): """ Return list of strings parsed from `content` attribute from ``<meta>`` tags with given `meta_name`. """ dom = dhtmlparser.parseString(content) meta_tags = dom.find( "meta", fn=lambda x: x.params.get("name", "").lower() == meta_name.lower() ) return [ SourceString(tag.params[content_attr_name], source_descr) for tag in meta_tags if content_attr_name in tag.params ]
Return list of strings parsed from `content` attribute from ``<meta>`` tags with given `meta_name`.
def license_loader(lic_dir=LIC_DIR): """Loads licenses from the given directory.""" lics = [] for ln in os.listdir(lic_dir): lp = os.path.join(lic_dir, ln) with open(lp) as lf: txt = lf.read() lic = License(txt) lics.append(lic) return lics
Loads licenses from the given directory.
def formatted_filters(self): """ Cache and return filters as a comprehensive WQL clause. """ if not self._formatted_filters: filters = deepcopy(self.filters) self._formatted_filters = self._format_filter(filters, self._and_props) return self._formatted_filters
Cache and return filters as a comprehensive WQL clause.
def to_json_file(graph: BELGraph, file: TextIO, **kwargs) -> None: """Write this graph as Node-Link JSON to a file.""" graph_json_dict = to_json(graph) json.dump(graph_json_dict, file, ensure_ascii=False, **kwargs)
Write this graph as Node-Link JSON to a file.
def _headers(self): """Ensure the Authorization Header has a valid Access Token.""" if not self.access_token or not self.access_token_expires: self._basic_login() elif datetime.now() > self.access_token_expires - timedelta(seconds=30): self._basic_login() return {'Accept': HEADER_ACCEPT, 'Authorization': 'bearer ' + self.access_token}
Ensure the Authorization Header has a valid Access Token.
def nodeids(self, ivs=None, quantifier=None): """ Return the list of nodeids given by *ivs*, or all nodeids. Args: ivs: the intrinsic variables of the predications to select; if `None`, return all nodeids (but see *quantifier*) quantifier: if `True`, only return nodeids of quantifiers; if `False`, only return non-quantifiers; if `None` (the default), return both """ if ivs is None: nids = list(self._nodeids) else: _vars = self._vars nids = [] for iv in ivs: if iv in _vars and IVARG_ROLE in _vars[iv]['refs']: nids.extend(_vars[iv]['refs'][IVARG_ROLE]) else: raise KeyError(iv) if quantifier is not None: nids = [n for n in nids if self.ep(n).is_quantifier()==quantifier] return nids
Return the list of nodeids given by *ivs*, or all nodeids. Args: ivs: the intrinsic variables of the predications to select; if `None`, return all nodeids (but see *quantifier*) quantifier: if `True`, only return nodeids of quantifiers; if `False`, only return non-quantifiers; if `None` (the default), return both
def rados_df(self, host_list=None, remote_user=None, remote_pass=None): ''' Invoked the rados df command and return output to user ''' result, failed_hosts = self.runner.ansible_perform_operation( host_list=host_list, remote_user=remote_user, remote_pass=remote_pass, module="command", module_args="rados df") parsed_result = self.rados_parse_df(result) return parsed_result
Invoked the rados df command and return output to user
def validate(self, options): """ Validate the options or exit() """ try: codecs.getencoder(options.char_encoding) except LookupError: self.parser.error("invalid 'char-encoding' %s" % options.char_encoding)
Validate the options or exit()
def _read_http_settings(self, size, kind, flag): """Read HTTP/2 SETTINGS frames. Structure of HTTP/2 SETTINGS frame [RFC 7540]: +-----------------------------------------------+ | Length (24) | +---------------+---------------+---------------+ | Type (8) | Flags (8) | +-+-------------+---------------+-------------------------------+ |R| Stream Identifier (31) | +---------------------------------------------------------------+ | Identifier (16) | +-------------------------------+-------------------------------+ | Value (32) | +---------------------------------------------------------------+ | ...... | Octets Bits Name Description 0 0 http.length Length 3 24 http.type Type (2) 4 32 http.flags Flags 5 40 - Reserved 5 41 http.sid Stream Identifier 9 72 http.settings Settings 9 72 http.settings.id Identifier 10 80 http.settings.value Value """ if size % 5 != 0: raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True) _flag = dict( ACK=False, # bit 0 ) for index, bit in enumerate(flag): if index == 0 and bit: _flag['ACK'] = True elif bit: raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True) else: continue if _flag['ACK'] and size: raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True) _para = dict() counter = 0 while counter < size: _stid = self._read_unpack(1) _pval = self._read_unpack(4) _pkey = _PARA_NAME.get(_stid, 'Unsigned') if _pkey in _para: if isinstance(_para[_pkey], tuple): _para[_pkey] += (_pval,) else: _para[_pkey] = (_para[_pkey], _pval) else: _para[_pkey] = _pval data = dict( flags=_flag, ) data.update(_para) return data
Read HTTP/2 SETTINGS frames. Structure of HTTP/2 SETTINGS frame [RFC 7540]: +-----------------------------------------------+ | Length (24) | +---------------+---------------+---------------+ | Type (8) | Flags (8) | +-+-------------+---------------+-------------------------------+ |R| Stream Identifier (31) | +---------------------------------------------------------------+ | Identifier (16) | +-------------------------------+-------------------------------+ | Value (32) | +---------------------------------------------------------------+ | ...... | Octets Bits Name Description 0 0 http.length Length 3 24 http.type Type (2) 4 32 http.flags Flags 5 40 - Reserved 5 41 http.sid Stream Identifier 9 72 http.settings Settings 9 72 http.settings.id Identifier 10 80 http.settings.value Value
def access_storage_list(**kwargs): """ Shows collections with ACL. """ ctx = Context(**kwargs) ctx.execute_action('access:storage:list', **{ 'storage': ctx.repo.create_secure_service('storage'), })
Shows collections with ACL.
def get_current_thread_id(thread): ''' Note: the difference from get_current_thread_id to get_thread_id is that for the current thread we can get the thread id while the thread.ident is still not set in the Thread instance. ''' try: # Fast path without getting lock. tid = thread.__pydevd_id__ if tid is None: # Fix for https://www.brainwy.com/tracker/PyDev/645 # if __pydevd_id__ is None, recalculate it... also, use an heuristic # that gives us always the same id for the thread (using thread.ident or id(thread)). raise AttributeError() except AttributeError: tid = _get_or_compute_thread_id_with_lock(thread, is_current_thread=True) return tid
Note: the difference from get_current_thread_id to get_thread_id is that for the current thread we can get the thread id while the thread.ident is still not set in the Thread instance.
def metadata_and_cell_to_header(notebook, metadata, text_format, ext): """ Return the text header corresponding to a notebook, and remove the first cell of the notebook if it contained the header """ header = [] lines_to_next_cell = None if notebook.cells: cell = notebook.cells[0] if cell.cell_type == 'raw': lines = cell.source.strip('\n\t ').splitlines() if len(lines) >= 2 \ and _HEADER_RE.match(lines[0]) \ and _HEADER_RE.match(lines[-1]): header = lines[1:-1] lines_to_next_cell = cell.metadata.get('lines_to_next_cell') notebook.cells = notebook.cells[1:] metadata = insert_jupytext_info_and_filter_metadata(metadata, ext, text_format) if metadata: header.extend(yaml.safe_dump({'jupyter': metadata}, default_flow_style=False).splitlines()) if header: header = ['---'] + header + ['---'] return comment_lines(header, text_format.header_prefix), lines_to_next_cell
Return the text header corresponding to a notebook, and remove the first cell of the notebook if it contained the header
def destroy(self, request, pk=None, parent_lookup_organization=None): '''Remove a user from an organization.''' user = get_object_or_404(User, pk=pk) org = get_object_or_404( SeedOrganization, pk=parent_lookup_organization) self.check_object_permissions(request, org) org.users.remove(user) return Response(status=status.HTTP_204_NO_CONTENT)
Remove a user from an organization.
def frequency(self, mapping): """ Returns frequency of a given :class:`caspo.core.mapping.Mapping` Parameters ---------- mapping : :class:`caspo.core.mapping.Mapping` A logical conjuntion mapping Returns ------- float Frequency of the given mapping over all logical networks Raises ------ ValueError If the given mapping is not found in the mappings of the underlying hypergraph of this list """ return self.__matrix[:, self.hg.mappings[mapping]].mean()
Returns frequency of a given :class:`caspo.core.mapping.Mapping` Parameters ---------- mapping : :class:`caspo.core.mapping.Mapping` A logical conjuntion mapping Returns ------- float Frequency of the given mapping over all logical networks Raises ------ ValueError If the given mapping is not found in the mappings of the underlying hypergraph of this list
def err(r): """ Input: { return - return code error - error text } Output: Nothing; quits program """ import sys rc=r['return'] re=r['error'] out('Error: '+re) sys.exit(rc)
Input: { return - return code error - error text } Output: Nothing; quits program
def _gen_form_data(self) -> multipart.MultipartWriter: """Encode a list of fields using the multipart/form-data MIME format""" for dispparams, headers, value in self._fields: try: if hdrs.CONTENT_TYPE in headers: part = payload.get_payload( value, content_type=headers[hdrs.CONTENT_TYPE], headers=headers, encoding=self._charset) else: part = payload.get_payload( value, headers=headers, encoding=self._charset) except Exception as exc: raise TypeError( 'Can not serialize value type: %r\n ' 'headers: %r\n value: %r' % ( type(value), headers, value)) from exc if dispparams: part.set_content_disposition( 'form-data', quote_fields=self._quote_fields, **dispparams ) # FIXME cgi.FieldStorage doesn't likes body parts with # Content-Length which were sent via chunked transfer encoding assert part.headers is not None part.headers.popall(hdrs.CONTENT_LENGTH, None) self._writer.append_payload(part) return self._writer
Encode a list of fields using the multipart/form-data MIME format
def write_outro (self): """Write outro comments.""" self.stoptime = time.time() duration = self.stoptime - self.starttime self.comment(_("Stopped checking at %(time)s (%(duration)s)") % {"time": strformat.strtime(self.stoptime), "duration": strformat.strduration_long(duration)})
Write outro comments.
def member_ids(self): """Members of this group.""" info = self.raw.get(ATTR_MEMBERS, {}) if not info or ROOT_DEVICES2 not in info: return [] return info[ROOT_DEVICES2].get(ATTR_ID, [])
Members of this group.
def guess_depth(self, root_dir): """ Try to guess the depth of a directory repository (i.e. whether it has sub-folders for multiple subjects or visits, depending on where files and/or derived label files are found in the hierarchy of sub-directories under the root dir. Parameters ---------- root_dir : str Path to the root directory of the repository """ deepest = -1 for path, dirs, files in os.walk(root_dir): depth = self.path_depth(path) filtered_files = self._filter_files(files, path) if filtered_files: logger.info("Guessing depth of directory repository at '{}' is" " {} due to unfiltered files ('{}') in '{}'" .format(root_dir, depth, "', '".join(filtered_files), path)) return depth if self.PROV_DIR in dirs: depth_to_return = max(depth - 1, 0) logger.info("Guessing depth of directory repository at '{}' is" "{} due to \"Derived label file\" in '{}'" .format(root_dir, depth_to_return, path)) return depth_to_return if depth >= self.MAX_DEPTH: logger.info("Guessing depth of directory repository at '{}' is" " {} as '{}' is already at maximum depth" .format(root_dir, self.MAX_DEPTH, path)) return self.MAX_DEPTH try: for fpath in chain(filtered_files, self._filter_dirs(dirs, path)): Fileset.from_path(fpath) except ArcanaError: pass else: if depth > deepest: deepest = depth if deepest == -1: raise ArcanaRepositoryError( "Could not guess depth of '{}' repository as did not find " "a valid session directory within sub-directories." .format(root_dir)) return deepest
Try to guess the depth of a directory repository (i.e. whether it has sub-folders for multiple subjects or visits, depending on where files and/or derived label files are found in the hierarchy of sub-directories under the root dir. Parameters ---------- root_dir : str Path to the root directory of the repository
def astype(self, dtype): """Returns a view that does on the fly type conversion of the underlying data. Parameters ---------- dtype : string or dtype NumPy dtype. Notes ----- This method returns a new Array object which is a view on the same underlying chunk data. Modifying any data via the view is currently not permitted and will result in an error. This is an experimental feature and its behavior is subject to change in the future. See Also -------- Array.view Examples -------- >>> import zarr >>> import numpy as np >>> data = np.arange(100, dtype=np.uint8) >>> a = zarr.array(data, chunks=10) >>> a[:] array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], dtype=uint8) >>> v = a.astype(np.float32) >>> v.is_view True >>> v[:] array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.], dtype=float32) """ dtype = np.dtype(dtype) filters = [] if self._filters: filters.extend(self._filters) filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype)) return self.view(filters=filters, dtype=dtype, read_only=True)
Returns a view that does on the fly type conversion of the underlying data. Parameters ---------- dtype : string or dtype NumPy dtype. Notes ----- This method returns a new Array object which is a view on the same underlying chunk data. Modifying any data via the view is currently not permitted and will result in an error. This is an experimental feature and its behavior is subject to change in the future. See Also -------- Array.view Examples -------- >>> import zarr >>> import numpy as np >>> data = np.arange(100, dtype=np.uint8) >>> a = zarr.array(data, chunks=10) >>> a[:] array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], dtype=uint8) >>> v = a.astype(np.float32) >>> v.is_view True >>> v[:] array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.], dtype=float32)
def _plot_connectivity_helper(self, ii, ji, mat_datai, data, lims=[1, 8]): """ A debug function used to plot the adjacency/connectivity matrix. """ from matplotlib.pyplot import quiver, colorbar, clim, matshow I = ~np.isnan(mat_datai) & (ji != -1) & (mat_datai >= 0) mat_data = mat_datai[I] j = ji[I] i = ii[I] x = i.astype(float) % data.shape[1] y = i.astype(float) // data.shape[1] x1 = (j.astype(float) % data.shape[1]).ravel() y1 = (j.astype(float) // data.shape[1]).ravel() nx = (x1 - x) ny = (y1 - y) matshow(data, cmap='gist_rainbow'); colorbar(); clim(lims) quiver(x, y, nx, ny, mat_data.ravel(), angles='xy', scale_units='xy', scale=1, cmap='bone') colorbar(); clim([0, 1])
A debug function used to plot the adjacency/connectivity matrix.
def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs): """ Clone variables unused by the attack on all GPUs. Specifically, the ground-truth label, y, has to be preserved until the training step. :param inputs: A list of dictionaries as the inputs to each step. :param outputs: A list of dictionaries as the outputs of each step. :param g0_inputs: Initial variables to be cloned. :return: Updated inputs and outputs. """ assert len(inputs) == len(outputs), ( 'Inputs and outputs should have the same number of elements.') inputs[0].update(g0_inputs) outputs[0].update(g0_inputs) # Copy g0_inputs forward for i in range(1, len(inputs)): # Create the graph for i'th step of attack device_name = inputs[i]['x'].device with tf.device(device_name): with tf.variable_scope('step%d' % i): for k, v in g0_inputs.iteritems(): if k not in inputs[i]: v_copy = clone_variable(k, v) inputs[i][k] = v_copy outputs[i][k] = v_copy return inputs, outputs
Clone variables unused by the attack on all GPUs. Specifically, the ground-truth label, y, has to be preserved until the training step. :param inputs: A list of dictionaries as the inputs to each step. :param outputs: A list of dictionaries as the outputs of each step. :param g0_inputs: Initial variables to be cloned. :return: Updated inputs and outputs.
def bit_size(self): """ :return: The bit size of the private key, as an integer """ if self._bit_size is None: if self.algorithm == 'rsa': prime = self['private_key'].parsed['modulus'].native elif self.algorithm == 'dsa': prime = self['private_key_algorithm']['parameters']['p'].native elif self.algorithm == 'ec': prime = self['private_key'].parsed['private_key'].native self._bit_size = int(math.ceil(math.log(prime, 2))) modulus = self._bit_size % 8 if modulus != 0: self._bit_size += 8 - modulus return self._bit_size
:return: The bit size of the private key, as an integer
def destroy_list(self, list_id): """ Destroy a list :param list_id: list ID number :return: The destroyed list object :rtype: :class:`~responsebot.models.List` """ return List(tweepy_list_to_json(self._client.destroy_list(list_id=list_id)))
Destroy a list :param list_id: list ID number :return: The destroyed list object :rtype: :class:`~responsebot.models.List`
def _compose_restart(services): """Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Relevant fix which will make it into the next release: https://github.com/docker/compose/pull/1318""" def _restart_container(client, container): log_to_client('Restarting {}'.format(get_canonical_container_name(container))) client.restart(container['Id'], timeout=1) assembled_specs = get_assembled_specs() if services == []: services = [spec.name for spec in assembled_specs.get_apps_and_services()] logging.info('Restarting service containers from list: {}'.format(services)) client = get_docker_client() for service in services: container = get_container_for_app_or_service(service, include_exited=True) if container is None: log_to_client('No container found for {}'.format(service)) continue stopped_linked_containers = _check_stopped_linked_containers(container, assembled_specs) if stopped_linked_containers: log_to_client('No running containers {0}, which are linked to by {1}. Cannot restart {1}'.format( stopped_linked_containers, service)) else: _restart_container(client, container)
Well, this is annoying. Compose 1.2 shipped with the restart functionality fucking broken, so we can't set a faster timeout than 10 seconds (which is way too long) using Compose. We are therefore resigned to trying to hack this together ourselves. Lame. Relevant fix which will make it into the next release: https://github.com/docker/compose/pull/1318
def pmbb(self,*args,**kwargs): """ NAME: pmbb PURPOSE: return proper motion in Galactic latitude (in mas/yr) INPUT: t - (optional) time at which to get pmbb (can be Quantity) obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantity) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_b(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU) """ out= self._orb.pmbb(*args,**kwargs) if len(out) == 1: return out[0] else: return out
NAME: pmbb PURPOSE: return proper motion in Galactic latitude (in mas/yr) INPUT: t - (optional) time at which to get pmbb (can be Quantity) obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.,0.,220.,0.]; entries can be Quantity) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) OUTPUT: pm_b(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU)
def _pull_and_tag_image(self, image, build_json, nonce): """Docker pull the image and tag it uniquely for use by this build""" image = image.copy() first_library_exc = None for _ in range(20): # retry until pull and tag is successful or definitively fails. # should never require 20 retries but there's a race condition at work. # just in case something goes wildly wrong, limit to 20 so it terminates. try: self.tasker.pull_image(image, insecure=self.parent_registry_insecure, dockercfg_path=self.parent_registry_dockercfg_path) self.workflow.pulled_base_images.add(image.to_str()) except RetryGeneratorException as exc: # getting here means the pull itself failed. we may want to retry if the # image being pulled lacks a namespace, like e.g. "rhel7". we cannot count # on the registry mapping this into the docker standard "library/rhel7" so # need to retry with that. if first_library_exc: # we already tried and failed; report the first failure. raise first_library_exc if image.namespace: # already namespaced, do not retry with "library/", just fail. raise self.log.info("'%s' not found", image.to_str()) image.namespace = 'library' self.log.info("trying '%s'", image.to_str()) first_library_exc = exc # report first failure if retry also fails continue # Attempt to tag it using a unique ID. We might have to retry # if another build with the same parent image is finishing up # and removing images it pulled. # Use the OpenShift build name as the unique ID unique_id = build_json['metadata']['name'] new_image = ImageName(repo=unique_id, tag=nonce) try: self.log.info("tagging pulled image") response = self.tasker.tag_image(image, new_image) self.workflow.pulled_base_images.add(response) self.log.debug("image '%s' is available as '%s'", image, new_image) return new_image except docker.errors.NotFound: # If we get here, some other build raced us to remove # the parent image, and that build won. # Retry the pull immediately. self.log.info("re-pulling removed image") continue # Failed to tag it after 20 tries self.log.error("giving up trying to pull image") raise RuntimeError("too many attempts to pull and tag image")
Docker pull the image and tag it uniquely for use by this build
def html2md(html_string): """ Convert a string or html file to a markdown table string. Parameters ---------- html_string : str Either the html string, or the filepath to the html Returns ------- str The html table converted to a Markdown table Notes ----- This function requires BeautifulSoup_ to work. Example ------- >>> html_text = ''' ... <table> ... <tr> ... <th> ... Header 1 ... </th> ... <th> ... Header 2 ... </th> ... <th> ... Header 3 ... </th> ... <tr> ... <td> ... <p>This is a paragraph</p> ... </td> ... <td> ... Just text ... </td> ... <td> ... Hot dog ... </td> ... </tr> ... </table> ... ''' >>> import dashtable >>> print(dashtable.html2md(html_text)) | Header 1 | Header 2 | Header 3 | |---------------------|-----------|----------| | This is a paragraph | Just text | Hot dog | .. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/ """ if os.path.isfile(html_string): file = open(html_string, 'r', encoding='utf-8') lines = file.readlines() file.close() html_string = ''.join(lines) table_data, spans, use_headers = html2data(html_string) if table_data == '': return '' return data2md(table_data)
Convert a string or html file to a markdown table string. Parameters ---------- html_string : str Either the html string, or the filepath to the html Returns ------- str The html table converted to a Markdown table Notes ----- This function requires BeautifulSoup_ to work. Example ------- >>> html_text = ''' ... <table> ... <tr> ... <th> ... Header 1 ... </th> ... <th> ... Header 2 ... </th> ... <th> ... Header 3 ... </th> ... <tr> ... <td> ... <p>This is a paragraph</p> ... </td> ... <td> ... Just text ... </td> ... <td> ... Hot dog ... </td> ... </tr> ... </table> ... ''' >>> import dashtable >>> print(dashtable.html2md(html_text)) | Header 1 | Header 2 | Header 3 | |---------------------|-----------|----------| | This is a paragraph | Just text | Hot dog | .. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
def sizeof(s): """ Return the size of an object when packed """ if hasattr(s, '_size_'): return s._size_ elif isinstance(s, bytes): return len(s) raise ValueError(s)
Return the size of an object when packed
def isDirect(self): """Test if the message is a direct message type.""" direct = (self._messageType == 0x00) if self.isDirectACK or self.isDirectNAK: direct = True return direct
Test if the message is a direct message type.