code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def before_sleep_func_accept_retry_state(fn): """Wrap "before_sleep" function to accept "retry_state".""" if not six.callable(fn): return fn if func_takes_retry_state(fn): return fn @_utils.wraps(fn) def wrapped_before_sleep_func(retry_state): # retry_object, sleep, last_result warn_about_non_retry_state_deprecation( 'before_sleep', fn, stacklevel=4) return fn( retry_state.retry_object, sleep=getattr(retry_state.next_action, 'sleep'), last_result=retry_state.outcome) return wrapped_before_sleep_func
Wrap "before_sleep" function to accept "retry_state".
Below is the the instruction that describes the task: ### Input: Wrap "before_sleep" function to accept "retry_state". ### Response: def before_sleep_func_accept_retry_state(fn): """Wrap "before_sleep" function to accept "retry_state".""" if not six.callable(fn): return fn if func_takes_retry_state(fn): return fn @_utils.wraps(fn) def wrapped_before_sleep_func(retry_state): # retry_object, sleep, last_result warn_about_non_retry_state_deprecation( 'before_sleep', fn, stacklevel=4) return fn( retry_state.retry_object, sleep=getattr(retry_state.next_action, 'sleep'), last_result=retry_state.outcome) return wrapped_before_sleep_func
def add(self, origin, rel, target, attrs=None, index=None): ''' Add one relationship to the extent origin - origin of the relationship (similar to an RDF subject) rel - type IRI of the relationship (similar to an RDF predicate) target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} index - optional position for the relationship to be inserted ''' #FIXME: return an ID (IRI) for the resulting relationship? if not origin: raise ValueError('Relationship origin cannot be null') if not rel: raise ValueError('Relationship ID cannot be null') # convert attribute class to the expected type if type(attrs) != type(self._attr_cls): attrs = self._attr_cls(attrs or {}) #No, could be an I instance, fails assertion #assert isinstance(origin, str) and isinstance(origin, str) and isinstance(origin, str) and isinstance(origin, dict), (origin, rel, target, attrs) item = (origin, rel, target, attrs) if index is not None: rid = index self._relationships.insert(index, item) else: rid = self.size() self._relationships.append(item) return rid
Add one relationship to the extent origin - origin of the relationship (similar to an RDF subject) rel - type IRI of the relationship (similar to an RDF predicate) target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} index - optional position for the relationship to be inserted
Below is the the instruction that describes the task: ### Input: Add one relationship to the extent origin - origin of the relationship (similar to an RDF subject) rel - type IRI of the relationship (similar to an RDF predicate) target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} index - optional position for the relationship to be inserted ### Response: def add(self, origin, rel, target, attrs=None, index=None): ''' Add one relationship to the extent origin - origin of the relationship (similar to an RDF subject) rel - type IRI of the relationship (similar to an RDF predicate) target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2} index - optional position for the relationship to be inserted ''' #FIXME: return an ID (IRI) for the resulting relationship? if not origin: raise ValueError('Relationship origin cannot be null') if not rel: raise ValueError('Relationship ID cannot be null') # convert attribute class to the expected type if type(attrs) != type(self._attr_cls): attrs = self._attr_cls(attrs or {}) #No, could be an I instance, fails assertion #assert isinstance(origin, str) and isinstance(origin, str) and isinstance(origin, str) and isinstance(origin, dict), (origin, rel, target, attrs) item = (origin, rel, target, attrs) if index is not None: rid = index self._relationships.insert(index, item) else: rid = self.size() self._relationships.append(item) return rid
def authenticate(username, password): """Authenticate with a DC/OS cluster and return an ACS token. return: ACS token """ url = _gen_url('acs/api/v1/auth/login') creds = { 'uid': username, 'password': password } response = dcos.http.request('post', url, json=creds) if response.status_code == 200: return response.json()['token'] else: return None
Authenticate with a DC/OS cluster and return an ACS token. return: ACS token
Below is the the instruction that describes the task: ### Input: Authenticate with a DC/OS cluster and return an ACS token. return: ACS token ### Response: def authenticate(username, password): """Authenticate with a DC/OS cluster and return an ACS token. return: ACS token """ url = _gen_url('acs/api/v1/auth/login') creds = { 'uid': username, 'password': password } response = dcos.http.request('post', url, json=creds) if response.status_code == 200: return response.json()['token'] else: return None
def deduplicate( ctx, strategy, time_source, regexp, dry_run, message_id, size_threshold, content_threshold, show_diff, maildirs): """ Deduplicate mails from a set of maildir folders. Run a first pass computing the canonical hash of each encountered mail from their headers, then a second pass to apply the deletion strategy on each subset of duplicate mails. \b Removal strategies for each subsets of duplicate mails: - delete-older: Deletes the olders, keeps the newests. - delete-oldest: Deletes the oldests, keeps the newers. - delete-newer: Deletes the newers, keeps the oldests. - delete-newest: Deletes the newests, keeps the olders. - delete-smaller: Deletes the smallers, keeps the biggests. - delete-smallest: Deletes the smallests, keeps the biggers. - delete-bigger: Deletes the biggers, keeps the smallests. - delete-biggest: Deletes the biggests, keeps the smallers. - delete-matching-path: Deletes all duplicates whose file path match the regular expression provided via the --regexp parameter. - delete-non-matching-path: Deletes all duplicates whose file path doesn't match the regular expression provided via the --regexp parameter. Deletion strategy on a duplicate set only applies if no major differences between mails are uncovered during a fine-grained check differences during the second pass. Limits can be set via the threshold options. """ # Print help screen and exit if no maildir folder provided. if not maildirs: click.echo(ctx.get_help()) ctx.exit() # Validate exclusive options requirement depending on strategy. requirements = [ (time_source, '-t/--time-source', [ DELETE_OLDER, DELETE_OLDEST, DELETE_NEWER, DELETE_NEWEST]), (regexp, '-r/--regexp', [ DELETE_MATCHING_PATH, DELETE_NON_MATCHING_PATH])] for param_value, param_name, required_strategies in requirements: if strategy in required_strategies: if not param_value: raise click.BadParameter( '{} strategy requires the {} parameter.'.format( strategy, param_name)) elif param_value: raise click.BadParameter( '{} parameter not allowed in {} strategy.'.format( param_name, strategy)) conf = Config( strategy=strategy, time_source=time_source, regexp=regexp, dry_run=dry_run, show_diff=show_diff, message_id=message_id, size_threshold=size_threshold, content_threshold=content_threshold, # progress=progress, ) dedup = Deduplicate(conf) logger.info('=== Start phase #1: load mails and compute hashes.') for maildir in maildirs: dedup.add_maildir(maildir) logger.info('=== Start phase #2: deduplicate mails.') dedup.run() dedup.report()
Deduplicate mails from a set of maildir folders. Run a first pass computing the canonical hash of each encountered mail from their headers, then a second pass to apply the deletion strategy on each subset of duplicate mails. \b Removal strategies for each subsets of duplicate mails: - delete-older: Deletes the olders, keeps the newests. - delete-oldest: Deletes the oldests, keeps the newers. - delete-newer: Deletes the newers, keeps the oldests. - delete-newest: Deletes the newests, keeps the olders. - delete-smaller: Deletes the smallers, keeps the biggests. - delete-smallest: Deletes the smallests, keeps the biggers. - delete-bigger: Deletes the biggers, keeps the smallests. - delete-biggest: Deletes the biggests, keeps the smallers. - delete-matching-path: Deletes all duplicates whose file path match the regular expression provided via the --regexp parameter. - delete-non-matching-path: Deletes all duplicates whose file path doesn't match the regular expression provided via the --regexp parameter. Deletion strategy on a duplicate set only applies if no major differences between mails are uncovered during a fine-grained check differences during the second pass. Limits can be set via the threshold options.
Below is the the instruction that describes the task: ### Input: Deduplicate mails from a set of maildir folders. Run a first pass computing the canonical hash of each encountered mail from their headers, then a second pass to apply the deletion strategy on each subset of duplicate mails. \b Removal strategies for each subsets of duplicate mails: - delete-older: Deletes the olders, keeps the newests. - delete-oldest: Deletes the oldests, keeps the newers. - delete-newer: Deletes the newers, keeps the oldests. - delete-newest: Deletes the newests, keeps the olders. - delete-smaller: Deletes the smallers, keeps the biggests. - delete-smallest: Deletes the smallests, keeps the biggers. - delete-bigger: Deletes the biggers, keeps the smallests. - delete-biggest: Deletes the biggests, keeps the smallers. - delete-matching-path: Deletes all duplicates whose file path match the regular expression provided via the --regexp parameter. - delete-non-matching-path: Deletes all duplicates whose file path doesn't match the regular expression provided via the --regexp parameter. Deletion strategy on a duplicate set only applies if no major differences between mails are uncovered during a fine-grained check differences during the second pass. Limits can be set via the threshold options. ### Response: def deduplicate( ctx, strategy, time_source, regexp, dry_run, message_id, size_threshold, content_threshold, show_diff, maildirs): """ Deduplicate mails from a set of maildir folders. Run a first pass computing the canonical hash of each encountered mail from their headers, then a second pass to apply the deletion strategy on each subset of duplicate mails. \b Removal strategies for each subsets of duplicate mails: - delete-older: Deletes the olders, keeps the newests. - delete-oldest: Deletes the oldests, keeps the newers. - delete-newer: Deletes the newers, keeps the oldests. - delete-newest: Deletes the newests, keeps the olders. - delete-smaller: Deletes the smallers, keeps the biggests. - delete-smallest: Deletes the smallests, keeps the biggers. - delete-bigger: Deletes the biggers, keeps the smallests. - delete-biggest: Deletes the biggests, keeps the smallers. - delete-matching-path: Deletes all duplicates whose file path match the regular expression provided via the --regexp parameter. - delete-non-matching-path: Deletes all duplicates whose file path doesn't match the regular expression provided via the --regexp parameter. Deletion strategy on a duplicate set only applies if no major differences between mails are uncovered during a fine-grained check differences during the second pass. Limits can be set via the threshold options. """ # Print help screen and exit if no maildir folder provided. if not maildirs: click.echo(ctx.get_help()) ctx.exit() # Validate exclusive options requirement depending on strategy. requirements = [ (time_source, '-t/--time-source', [ DELETE_OLDER, DELETE_OLDEST, DELETE_NEWER, DELETE_NEWEST]), (regexp, '-r/--regexp', [ DELETE_MATCHING_PATH, DELETE_NON_MATCHING_PATH])] for param_value, param_name, required_strategies in requirements: if strategy in required_strategies: if not param_value: raise click.BadParameter( '{} strategy requires the {} parameter.'.format( strategy, param_name)) elif param_value: raise click.BadParameter( '{} parameter not allowed in {} strategy.'.format( param_name, strategy)) conf = Config( strategy=strategy, time_source=time_source, regexp=regexp, dry_run=dry_run, show_diff=show_diff, message_id=message_id, size_threshold=size_threshold, content_threshold=content_threshold, # progress=progress, ) dedup = Deduplicate(conf) logger.info('=== Start phase #1: load mails and compute hashes.') for maildir in maildirs: dedup.add_maildir(maildir) logger.info('=== Start phase #2: deduplicate mails.') dedup.run() dedup.report()
def _get_gcloud_sdk_credentials(): """Gets the credentials and project ID from the Cloud SDK.""" from google.auth import _cloud_sdk # Check if application default credentials exist. credentials_filename = ( _cloud_sdk.get_application_default_credentials_path()) if not os.path.isfile(credentials_filename): return None, None credentials, project_id = _load_credentials_from_file( credentials_filename) if not project_id: project_id = _cloud_sdk.get_project_id() return credentials, project_id
Gets the credentials and project ID from the Cloud SDK.
Below is the the instruction that describes the task: ### Input: Gets the credentials and project ID from the Cloud SDK. ### Response: def _get_gcloud_sdk_credentials(): """Gets the credentials and project ID from the Cloud SDK.""" from google.auth import _cloud_sdk # Check if application default credentials exist. credentials_filename = ( _cloud_sdk.get_application_default_credentials_path()) if not os.path.isfile(credentials_filename): return None, None credentials, project_id = _load_credentials_from_file( credentials_filename) if not project_id: project_id = _cloud_sdk.get_project_id() return credentials, project_id
def to_dict(self): """Return dictionary of object.""" dictionary = {} for key, value in iteritems(self.__dict__): property_name = key[1:] if hasattr(self, property_name): dictionary.update({property_name: getattr(self, property_name, None)}) return dictionary
Return dictionary of object.
Below is the the instruction that describes the task: ### Input: Return dictionary of object. ### Response: def to_dict(self): """Return dictionary of object.""" dictionary = {} for key, value in iteritems(self.__dict__): property_name = key[1:] if hasattr(self, property_name): dictionary.update({property_name: getattr(self, property_name, None)}) return dictionary
def handleHTML(self, record): """ Saves the given record's page content to a .html file Attributes record (Record) -- The log record """ # Create a unique file name to identify where this HTML source is coming from fileName = datetime.today().strftime("Neolib %Y-%m-%d %H-%M-%S ") + record.module + ".html" # Sometimes module may encase the text with < > which is an invalid character for a file name fileName = fileName.replace("<", "").replace(">", "") # Grab the pg pg = record.args['pg'] # Format a log message that details the page ret = "Message: " + record.msg + "\nLine Number: " + str(record.lineno) + "\nURL: " + str(pg.url) + "\nPost Data: " + str(pg.postData) + "\nAdditional Vars: " + str(pg.vars) ret += "\n\n\n" + str(pg.header) + "\n\n" + pg.content # Write the file f = open(fileName, "w", encoding='utf-8') f.write(ret) f.close()
Saves the given record's page content to a .html file Attributes record (Record) -- The log record
Below is the the instruction that describes the task: ### Input: Saves the given record's page content to a .html file Attributes record (Record) -- The log record ### Response: def handleHTML(self, record): """ Saves the given record's page content to a .html file Attributes record (Record) -- The log record """ # Create a unique file name to identify where this HTML source is coming from fileName = datetime.today().strftime("Neolib %Y-%m-%d %H-%M-%S ") + record.module + ".html" # Sometimes module may encase the text with < > which is an invalid character for a file name fileName = fileName.replace("<", "").replace(">", "") # Grab the pg pg = record.args['pg'] # Format a log message that details the page ret = "Message: " + record.msg + "\nLine Number: " + str(record.lineno) + "\nURL: " + str(pg.url) + "\nPost Data: " + str(pg.postData) + "\nAdditional Vars: " + str(pg.vars) ret += "\n\n\n" + str(pg.header) + "\n\n" + pg.content # Write the file f = open(fileName, "w", encoding='utf-8') f.write(ret) f.close()
def ncposr(string, chars, start): """ Find the first occurrence in a string of a character NOT belonging to a collection of characters, starting at a specified location, searching in reverse. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ncposr_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one of chars. :type start: int :return: index :rtype: int """ string = stypes.stringToCharP(string) chars = stypes.stringToCharP(chars) start = ctypes.c_int(start) return libspice.ncposr_c(string, chars, start)
Find the first occurrence in a string of a character NOT belonging to a collection of characters, starting at a specified location, searching in reverse. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ncposr_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one of chars. :type start: int :return: index :rtype: int
Below is the the instruction that describes the task: ### Input: Find the first occurrence in a string of a character NOT belonging to a collection of characters, starting at a specified location, searching in reverse. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ncposr_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one of chars. :type start: int :return: index :rtype: int ### Response: def ncposr(string, chars, start): """ Find the first occurrence in a string of a character NOT belonging to a collection of characters, starting at a specified location, searching in reverse. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ncposr_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one of chars. :type start: int :return: index :rtype: int """ string = stypes.stringToCharP(string) chars = stypes.stringToCharP(chars) start = ctypes.c_int(start) return libspice.ncposr_c(string, chars, start)
def WriteManyToPath(objs, filepath): """Serializes and writes given Python objects to a multi-document YAML file. Args: objs: An iterable of Python objects to serialize. filepath: A path to the file into which the object is to be written. """ with io.open(filepath, mode="w", encoding="utf-8") as filedesc: WriteManyToFile(objs, filedesc)
Serializes and writes given Python objects to a multi-document YAML file. Args: objs: An iterable of Python objects to serialize. filepath: A path to the file into which the object is to be written.
Below is the the instruction that describes the task: ### Input: Serializes and writes given Python objects to a multi-document YAML file. Args: objs: An iterable of Python objects to serialize. filepath: A path to the file into which the object is to be written. ### Response: def WriteManyToPath(objs, filepath): """Serializes and writes given Python objects to a multi-document YAML file. Args: objs: An iterable of Python objects to serialize. filepath: A path to the file into which the object is to be written. """ with io.open(filepath, mode="w", encoding="utf-8") as filedesc: WriteManyToFile(objs, filedesc)
def extend(self, other): """ Appends the segmentlists from other to the corresponding segmentlists in self, adding new segmentslists to self as needed. """ for key, value in other.iteritems(): if key not in self: self[key] = _shallowcopy(value) else: self[key].extend(value)
Appends the segmentlists from other to the corresponding segmentlists in self, adding new segmentslists to self as needed.
Below is the the instruction that describes the task: ### Input: Appends the segmentlists from other to the corresponding segmentlists in self, adding new segmentslists to self as needed. ### Response: def extend(self, other): """ Appends the segmentlists from other to the corresponding segmentlists in self, adding new segmentslists to self as needed. """ for key, value in other.iteritems(): if key not in self: self[key] = _shallowcopy(value) else: self[key].extend(value)
def execute_once(self, string): """Execute only one rule.""" for rule in self.rules: if rule[0] in string: pos = string.find(rule[0]) self.last_rule = rule return string[:pos] + rule[1] + string[pos+len(rule[0]):] self.last_rule = None return string
Execute only one rule.
Below is the the instruction that describes the task: ### Input: Execute only one rule. ### Response: def execute_once(self, string): """Execute only one rule.""" for rule in self.rules: if rule[0] in string: pos = string.find(rule[0]) self.last_rule = rule return string[:pos] + rule[1] + string[pos+len(rule[0]):] self.last_rule = None return string
def tags(cls, filename, namespace=None): """Extract tags from file.""" return cls._raster_opener(filename).tags(ns=namespace)
Extract tags from file.
Below is the the instruction that describes the task: ### Input: Extract tags from file. ### Response: def tags(cls, filename, namespace=None): """Extract tags from file.""" return cls._raster_opener(filename).tags(ns=namespace)
def get_asset_contents_by_ids(self, asset_content_ids): """Gets an ``AssetList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the asset contents specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``AssetContnts`` may be omitted from the list and may present the elements in any order including returning a unique set. :param asset_content_ids: the list of ``Ids`` to retrieve :type asset_content_ids: ``osid.id.IdList`` :return: the returned ``AssetContent list`` :rtype: ``osid.repository.AssetContentList`` :raise: ``NotFound`` -- an ``Id`` was not found :raise: ``NullArgument`` -- ``asset_ids`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) object_id_list = [ObjectId(self._get_id(i, 'repository').get_identifier()) for i in asset_content_ids] results = collection.find( dict({'assetContents._id': {'$in': object_id_list}}, **self._view_filter())) # if a match is not found, NotFound exception will be thrown by find_one, so # the below should always work asset_content_maps = [ac for asset in results for ac in asset['assetContents'] for object_id in object_id_list if ac['_id'] == object_id] return objects.AssetContentList(asset_content_maps, runtime=self._runtime, proxy=self._proxy)
Gets an ``AssetList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the asset contents specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``AssetContnts`` may be omitted from the list and may present the elements in any order including returning a unique set. :param asset_content_ids: the list of ``Ids`` to retrieve :type asset_content_ids: ``osid.id.IdList`` :return: the returned ``AssetContent list`` :rtype: ``osid.repository.AssetContentList`` :raise: ``NotFound`` -- an ``Id`` was not found :raise: ``NullArgument`` -- ``asset_ids`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets an ``AssetList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the asset contents specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``AssetContnts`` may be omitted from the list and may present the elements in any order including returning a unique set. :param asset_content_ids: the list of ``Ids`` to retrieve :type asset_content_ids: ``osid.id.IdList`` :return: the returned ``AssetContent list`` :rtype: ``osid.repository.AssetContentList`` :raise: ``NotFound`` -- an ``Id`` was not found :raise: ``NullArgument`` -- ``asset_ids`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* ### Response: def get_asset_contents_by_ids(self, asset_content_ids): """Gets an ``AssetList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the asset contents specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``AssetContnts`` may be omitted from the list and may present the elements in any order including returning a unique set. :param asset_content_ids: the list of ``Ids`` to retrieve :type asset_content_ids: ``osid.id.IdList`` :return: the returned ``AssetContent list`` :rtype: ``osid.repository.AssetContentList`` :raise: ``NotFound`` -- an ``Id`` was not found :raise: ``NullArgument`` -- ``asset_ids`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) object_id_list = [ObjectId(self._get_id(i, 'repository').get_identifier()) for i in asset_content_ids] results = collection.find( dict({'assetContents._id': {'$in': object_id_list}}, **self._view_filter())) # if a match is not found, NotFound exception will be thrown by find_one, so # the below should always work asset_content_maps = [ac for asset in results for ac in asset['assetContents'] for object_id in object_id_list if ac['_id'] == object_id] return objects.AssetContentList(asset_content_maps, runtime=self._runtime, proxy=self._proxy)
def check_tracer_for_mass_profile(func): """If none of the tracer's galaxies have a mass profile, it surface density, potential and deflections cannot \ be computed. This wrapper makes these properties return *None*. Parameters ---------- func : (self) -> Object A property function that requires galaxies to have a mass profile. """ @wraps(func) def wrapper(self): """ Parameters ---------- self Returns ------- A value or coordinate in the same coordinate system as those passed in. """ if self.has_mass_profile is True: return func(self) else: return None return wrapper
If none of the tracer's galaxies have a mass profile, it surface density, potential and deflections cannot \ be computed. This wrapper makes these properties return *None*. Parameters ---------- func : (self) -> Object A property function that requires galaxies to have a mass profile.
Below is the the instruction that describes the task: ### Input: If none of the tracer's galaxies have a mass profile, it surface density, potential and deflections cannot \ be computed. This wrapper makes these properties return *None*. Parameters ---------- func : (self) -> Object A property function that requires galaxies to have a mass profile. ### Response: def check_tracer_for_mass_profile(func): """If none of the tracer's galaxies have a mass profile, it surface density, potential and deflections cannot \ be computed. This wrapper makes these properties return *None*. Parameters ---------- func : (self) -> Object A property function that requires galaxies to have a mass profile. """ @wraps(func) def wrapper(self): """ Parameters ---------- self Returns ------- A value or coordinate in the same coordinate system as those passed in. """ if self.has_mass_profile is True: return func(self) else: return None return wrapper
def lru_cache(maxsize=100, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). def decorating_function(user_function, **kwargs): tuple = kwargs.get('tuple', builtins.tuple) sorted = kwargs.get('sorted', builtins.sorted) map = kwargs.get('map', builtins.map) len = kwargs.get('len', builtins.len) type = kwargs.get('type', builtins.type) KeyError = kwargs.get('KeyError', builtins.KeyError) hits = [0] misses = [0] kwd_mark = (object(),) # separates positional and keyword args lock = Lock() # needed because OrderedDict isn't threadsafe if maxsize is None: cache = dict() # simple cache without ordering or size limit @wraps(user_function) def wrapper(*args, **kwds): # nonlocal hits, misses key = args if kwds: sorted_items = tuple(sorted(kwds.items())) key += kwd_mark + sorted_items if typed: key += tuple(map(type, args)) if kwds: key += tuple(type(v) for k, v in sorted_items) try: result = cache[key] hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) cache[key] = result misses[0] += 1 return result else: cache = OrderedDict() # ordered least recent to most recent cache_popitem = cache.popitem # use the move_to_end method if available, otherwise fallback to # the function. cache_renew = getattr( cache, 'move_to_end', functools.partial(_move_to_end, cache)) @wraps(user_function) def wrapper(*args, **kwds): # nonlocal hits, misses key = args if kwds: sorted_items = tuple(sorted(kwds.items())) key += kwd_mark + sorted_items if typed: key += tuple(map(type, args)) if kwds: key += tuple(type(v) for k, v in sorted_items) with lock: try: result = cache[key] cache_renew(key) # record recent use of this key hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) with lock: cache[key] = result # record recent use of this key misses[0] += 1 if len(cache) > maxsize: cache_popitem(0) # purge least recently used cache entry return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(hits[0], misses[0], maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" # nonlocal hits, misses with lock: cache.clear() hits[0] = misses[0] = 0 wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return wrapper return decorating_function
Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
Below is the the instruction that describes the task: ### Input: Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used ### Response: def lru_cache(maxsize=100, typed=False): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). def decorating_function(user_function, **kwargs): tuple = kwargs.get('tuple', builtins.tuple) sorted = kwargs.get('sorted', builtins.sorted) map = kwargs.get('map', builtins.map) len = kwargs.get('len', builtins.len) type = kwargs.get('type', builtins.type) KeyError = kwargs.get('KeyError', builtins.KeyError) hits = [0] misses = [0] kwd_mark = (object(),) # separates positional and keyword args lock = Lock() # needed because OrderedDict isn't threadsafe if maxsize is None: cache = dict() # simple cache without ordering or size limit @wraps(user_function) def wrapper(*args, **kwds): # nonlocal hits, misses key = args if kwds: sorted_items = tuple(sorted(kwds.items())) key += kwd_mark + sorted_items if typed: key += tuple(map(type, args)) if kwds: key += tuple(type(v) for k, v in sorted_items) try: result = cache[key] hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) cache[key] = result misses[0] += 1 return result else: cache = OrderedDict() # ordered least recent to most recent cache_popitem = cache.popitem # use the move_to_end method if available, otherwise fallback to # the function. cache_renew = getattr( cache, 'move_to_end', functools.partial(_move_to_end, cache)) @wraps(user_function) def wrapper(*args, **kwds): # nonlocal hits, misses key = args if kwds: sorted_items = tuple(sorted(kwds.items())) key += kwd_mark + sorted_items if typed: key += tuple(map(type, args)) if kwds: key += tuple(type(v) for k, v in sorted_items) with lock: try: result = cache[key] cache_renew(key) # record recent use of this key hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) with lock: cache[key] = result # record recent use of this key misses[0] += 1 if len(cache) > maxsize: cache_popitem(0) # purge least recently used cache entry return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(hits[0], misses[0], maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" # nonlocal hits, misses with lock: cache.clear() hits[0] = misses[0] = 0 wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return wrapper return decorating_function
def _f_gene(sid, prefix="G_"): """Clips gene prefix from id.""" sid = sid.replace(SBML_DOT, ".") return _clip(sid, prefix)
Clips gene prefix from id.
Below is the the instruction that describes the task: ### Input: Clips gene prefix from id. ### Response: def _f_gene(sid, prefix="G_"): """Clips gene prefix from id.""" sid = sid.replace(SBML_DOT, ".") return _clip(sid, prefix)
def member_del(self, member_id, reconfig=True): """remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False """ server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) if reconfig and member_id in [member['_id'] for member in self.members()]: config = self.config config['members'].pop(member_id) self.repl_update(config) self._servers.remove(server_id) return True
remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False
Below is the the instruction that describes the task: ### Input: remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False ### Response: def member_del(self, member_id, reconfig=True): """remove member from replica set Args: member_id - member index reconfig - is need reconfig replica return True if operation success otherwise False """ server_id = self._servers.host_to_server_id( self.member_id_to_host(member_id)) if reconfig and member_id in [member['_id'] for member in self.members()]: config = self.config config['members'].pop(member_id) self.repl_update(config) self._servers.remove(server_id) return True
def _get_url(): ''' Get the kapacitor URL. ''' protocol = __salt__['config.option']('kapacitor.protocol', 'http') host = __salt__['config.option']('kapacitor.host', 'localhost') port = __salt__['config.option']('kapacitor.port', 9092) return '{0}://{1}:{2}'.format(protocol, host, port)
Get the kapacitor URL.
Below is the the instruction that describes the task: ### Input: Get the kapacitor URL. ### Response: def _get_url(): ''' Get the kapacitor URL. ''' protocol = __salt__['config.option']('kapacitor.protocol', 'http') host = __salt__['config.option']('kapacitor.host', 'localhost') port = __salt__['config.option']('kapacitor.port', 9092) return '{0}://{1}:{2}'.format(protocol, host, port)
def _parse_message(self, message): """Parse a message received from an AMQP service. :param message: The received C message. :type message: uamqp.c_uamqp.cMessage """ _logger.debug("Parsing received message %r.", self.delivery_no) self._message = message body_type = message.body_type if body_type == c_uamqp.MessageBodyType.NoneType: self._body = None elif body_type == c_uamqp.MessageBodyType.DataType: self._body = DataBody(self._message) elif body_type == c_uamqp.MessageBodyType.SequenceType: raise TypeError("Message body type Sequence not supported.") else: self._body = ValueBody(self._message) _props = self._message.properties if _props: _logger.debug("Parsing received message properties %r.", self.delivery_no) self.properties = MessageProperties(properties=_props, encoding=self._encoding) _header = self._message.header if _header: _logger.debug("Parsing received message header %r.", self.delivery_no) self.header = MessageHeader(header=_header) _footer = self._message.footer if _footer: _logger.debug("Parsing received message footer %r.", self.delivery_no) self.footer = _footer.map _app_props = self._message.application_properties if _app_props: _logger.debug("Parsing received message application properties %r.", self.delivery_no) self.application_properties = _app_props.map _ann = self._message.message_annotations if _ann: _logger.debug("Parsing received message annotations %r.", self.delivery_no) self.annotations = _ann.map _delivery_ann = self._message.delivery_annotations if _delivery_ann: _logger.debug("Parsing received message delivery annotations %r.", self.delivery_no) self.delivery_annotations = _delivery_ann.map
Parse a message received from an AMQP service. :param message: The received C message. :type message: uamqp.c_uamqp.cMessage
Below is the the instruction that describes the task: ### Input: Parse a message received from an AMQP service. :param message: The received C message. :type message: uamqp.c_uamqp.cMessage ### Response: def _parse_message(self, message): """Parse a message received from an AMQP service. :param message: The received C message. :type message: uamqp.c_uamqp.cMessage """ _logger.debug("Parsing received message %r.", self.delivery_no) self._message = message body_type = message.body_type if body_type == c_uamqp.MessageBodyType.NoneType: self._body = None elif body_type == c_uamqp.MessageBodyType.DataType: self._body = DataBody(self._message) elif body_type == c_uamqp.MessageBodyType.SequenceType: raise TypeError("Message body type Sequence not supported.") else: self._body = ValueBody(self._message) _props = self._message.properties if _props: _logger.debug("Parsing received message properties %r.", self.delivery_no) self.properties = MessageProperties(properties=_props, encoding=self._encoding) _header = self._message.header if _header: _logger.debug("Parsing received message header %r.", self.delivery_no) self.header = MessageHeader(header=_header) _footer = self._message.footer if _footer: _logger.debug("Parsing received message footer %r.", self.delivery_no) self.footer = _footer.map _app_props = self._message.application_properties if _app_props: _logger.debug("Parsing received message application properties %r.", self.delivery_no) self.application_properties = _app_props.map _ann = self._message.message_annotations if _ann: _logger.debug("Parsing received message annotations %r.", self.delivery_no) self.annotations = _ann.map _delivery_ann = self._message.delivery_annotations if _delivery_ann: _logger.debug("Parsing received message delivery annotations %r.", self.delivery_no) self.delivery_annotations = _delivery_ann.map
def _dispatch(self, func, args=None): """Send message to parent process Arguments: func (str): Name of function for parent to call args (list, optional): Arguments passed to function when called """ data = json.dumps( { "header": "pyblish-qml:popen.request", "payload": { "name": func, "args": args or list(), } } ) # This should never happen. Each request is immediately # responded to, always. If it isn't the next line will block. # If multiple responses were made, then this will fail. # Both scenarios are bugs. assert self.channels["response"].empty(), ( "There were pending messages in the response channel") sys.stdout.write(data + "\n") sys.stdout.flush() try: message = self.channels["response"].get() if six.PY3: response = json.loads(message) else: response = _byteify(json.loads(message, object_hook=_byteify)) except TypeError as e: raise e else: assert response["header"] == "pyblish-qml:popen.response", response return response["payload"]
Send message to parent process Arguments: func (str): Name of function for parent to call args (list, optional): Arguments passed to function when called
Below is the the instruction that describes the task: ### Input: Send message to parent process Arguments: func (str): Name of function for parent to call args (list, optional): Arguments passed to function when called ### Response: def _dispatch(self, func, args=None): """Send message to parent process Arguments: func (str): Name of function for parent to call args (list, optional): Arguments passed to function when called """ data = json.dumps( { "header": "pyblish-qml:popen.request", "payload": { "name": func, "args": args or list(), } } ) # This should never happen. Each request is immediately # responded to, always. If it isn't the next line will block. # If multiple responses were made, then this will fail. # Both scenarios are bugs. assert self.channels["response"].empty(), ( "There were pending messages in the response channel") sys.stdout.write(data + "\n") sys.stdout.flush() try: message = self.channels["response"].get() if six.PY3: response = json.loads(message) else: response = _byteify(json.loads(message, object_hook=_byteify)) except TypeError as e: raise e else: assert response["header"] == "pyblish-qml:popen.response", response return response["payload"]
def scheduleMeasurement(self, measurementId, duration, start): """ Schedules the requested measurement session with all INITIALISED devices. :param measurementId: :param duration: :param start: :return: a dict of device vs status. """ # TODO subtract 1s from start and format results = {} for device in self.getDevices(RecordingDeviceStatus.INITIALISED.name): logger.info('Sending measurement ' + measurementId + ' to ' + device.payload['serviceURL']) try: resp = self.httpclient.put(device.payload['serviceURL'] + '/measurements/' + measurementId, json={'duration': duration, 'at': start.strftime(DATETIME_FORMAT)}) logger.info('Response for ' + measurementId + ' from ' + device.payload['serviceURL'] + ' is ' + str(resp.status_code)) results[device] = resp.status_code except Exception as e: logger.exception(e) results[device] = 500 return results
Schedules the requested measurement session with all INITIALISED devices. :param measurementId: :param duration: :param start: :return: a dict of device vs status.
Below is the the instruction that describes the task: ### Input: Schedules the requested measurement session with all INITIALISED devices. :param measurementId: :param duration: :param start: :return: a dict of device vs status. ### Response: def scheduleMeasurement(self, measurementId, duration, start): """ Schedules the requested measurement session with all INITIALISED devices. :param measurementId: :param duration: :param start: :return: a dict of device vs status. """ # TODO subtract 1s from start and format results = {} for device in self.getDevices(RecordingDeviceStatus.INITIALISED.name): logger.info('Sending measurement ' + measurementId + ' to ' + device.payload['serviceURL']) try: resp = self.httpclient.put(device.payload['serviceURL'] + '/measurements/' + measurementId, json={'duration': duration, 'at': start.strftime(DATETIME_FORMAT)}) logger.info('Response for ' + measurementId + ' from ' + device.payload['serviceURL'] + ' is ' + str(resp.status_code)) results[device] = resp.status_code except Exception as e: logger.exception(e) results[device] = 500 return results
def _compute_k(self, tau): r"""Evaluate the kernel directly at the given values of `tau`. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. Returns ------- k : :py:class:`Array`, (`M`,) :math:`k(\tau)` (less the :math:`\sigma^2` prefactor). """ y, r2l2 = self._compute_y(tau, return_r2l2=True) k = 2.0**(1.0 - self.nu) / scipy.special.gamma(self.nu) * y**(self.nu / 2.0) * scipy.special.kv(self.nu, scipy.sqrt(y)) k[r2l2 == 0] = 1.0 return k
r"""Evaluate the kernel directly at the given values of `tau`. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. Returns ------- k : :py:class:`Array`, (`M`,) :math:`k(\tau)` (less the :math:`\sigma^2` prefactor).
Below is the the instruction that describes the task: ### Input: r"""Evaluate the kernel directly at the given values of `tau`. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. Returns ------- k : :py:class:`Array`, (`M`,) :math:`k(\tau)` (less the :math:`\sigma^2` prefactor). ### Response: def _compute_k(self, tau): r"""Evaluate the kernel directly at the given values of `tau`. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. Returns ------- k : :py:class:`Array`, (`M`,) :math:`k(\tau)` (less the :math:`\sigma^2` prefactor). """ y, r2l2 = self._compute_y(tau, return_r2l2=True) k = 2.0**(1.0 - self.nu) / scipy.special.gamma(self.nu) * y**(self.nu / 2.0) * scipy.special.kv(self.nu, scipy.sqrt(y)) k[r2l2 == 0] = 1.0 return k
def login(self) -> bool: """ Authorizes a user and returns a bool value of the result """ response = self.get(self.LOGIN_URL) login_url = get_base_url(response.text) login_data = {'email': self._login, 'pass': self._password} login_response = self.post(login_url, login_data) url_params = get_url_params(login_response.url) self.check_for_additional_actions(url_params, login_response.text, login_data) if 'remixsid' in self.cookies or 'remixsid6' in self.cookies: return True
Authorizes a user and returns a bool value of the result
Below is the the instruction that describes the task: ### Input: Authorizes a user and returns a bool value of the result ### Response: def login(self) -> bool: """ Authorizes a user and returns a bool value of the result """ response = self.get(self.LOGIN_URL) login_url = get_base_url(response.text) login_data = {'email': self._login, 'pass': self._password} login_response = self.post(login_url, login_data) url_params = get_url_params(login_response.url) self.check_for_additional_actions(url_params, login_response.text, login_data) if 'remixsid' in self.cookies or 'remixsid6' in self.cookies: return True
def process_top_line(self, words): """ Process the line starting with "top" Example log: top - 00:00:02 up 32 days, 7:08, 19 users, load average: 0.00, 0.00, 0.00 """ self.ts_time = words[2] self.ts = self.ts_date + ' ' + self.ts_time self.ts = ts = naarad.utils.get_standardized_timestamp(self.ts, None) if self.ts_out_of_range(self.ts): self.ts_valid_lines = False else: self.ts_valid_lines = True up_days = int(words[4]) up_hour_minute = words[6].split(':') # E.g. '4:02,' up_minutes = int(up_hour_minute[0]) * 60 + int(up_hour_minute[1].split(',')[0]) uptime_minute = up_days * 24 * 60 + up_minutes # Converting days to minutes values = {} values['uptime_minute'] = str(uptime_minute) values['num_users'] = words[7] values['load_aver_1_minute'] = words[11][:-1] values['load_aver_5_minute'] = words[12][:-1] values['load_aver_15_minute'] = words[13] self.put_values_into_data(values)
Process the line starting with "top" Example log: top - 00:00:02 up 32 days, 7:08, 19 users, load average: 0.00, 0.00, 0.00
Below is the the instruction that describes the task: ### Input: Process the line starting with "top" Example log: top - 00:00:02 up 32 days, 7:08, 19 users, load average: 0.00, 0.00, 0.00 ### Response: def process_top_line(self, words): """ Process the line starting with "top" Example log: top - 00:00:02 up 32 days, 7:08, 19 users, load average: 0.00, 0.00, 0.00 """ self.ts_time = words[2] self.ts = self.ts_date + ' ' + self.ts_time self.ts = ts = naarad.utils.get_standardized_timestamp(self.ts, None) if self.ts_out_of_range(self.ts): self.ts_valid_lines = False else: self.ts_valid_lines = True up_days = int(words[4]) up_hour_minute = words[6].split(':') # E.g. '4:02,' up_minutes = int(up_hour_minute[0]) * 60 + int(up_hour_minute[1].split(',')[0]) uptime_minute = up_days * 24 * 60 + up_minutes # Converting days to minutes values = {} values['uptime_minute'] = str(uptime_minute) values['num_users'] = words[7] values['load_aver_1_minute'] = words[11][:-1] values['load_aver_5_minute'] = words[12][:-1] values['load_aver_15_minute'] = words[13] self.put_values_into_data(values)
def getrange(bch, fieldname): """get the ranges for this field""" keys = ['maximum', 'minimum', 'maximum<', 'minimum>', 'type'] index = bch.objls.index(fieldname) fielddct_orig = bch.objidd[index] fielddct = copy.deepcopy(fielddct_orig) therange = {} for key in keys: therange[key] = fielddct.setdefault(key, None) if therange['type']: therange['type'] = therange['type'][0] if therange['type'] == 'real': for key in keys[:-1]: if therange[key]: therange[key] = float(therange[key][0]) if therange['type'] == 'integer': for key in keys[:-1]: if therange[key]: therange[key] = int(therange[key][0]) return therange
get the ranges for this field
Below is the the instruction that describes the task: ### Input: get the ranges for this field ### Response: def getrange(bch, fieldname): """get the ranges for this field""" keys = ['maximum', 'minimum', 'maximum<', 'minimum>', 'type'] index = bch.objls.index(fieldname) fielddct_orig = bch.objidd[index] fielddct = copy.deepcopy(fielddct_orig) therange = {} for key in keys: therange[key] = fielddct.setdefault(key, None) if therange['type']: therange['type'] = therange['type'][0] if therange['type'] == 'real': for key in keys[:-1]: if therange[key]: therange[key] = float(therange[key][0]) if therange['type'] == 'integer': for key in keys[:-1]: if therange[key]: therange[key] = int(therange[key][0]) return therange
def get_latex_name(func_in, **kwargs): """ Produce a latex formatted name for each function for use in labelling results. Parameters ---------- func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function. """ if isinstance(func_in, functools.partial): func = func_in.func assert not set(func_in.keywords) & set(kwargs), ( 'kwargs={0} and func_in.keywords={1} contain repeated keys' .format(kwargs, func_in.keywords)) kwargs.update(func_in.keywords) else: func = func_in param_ind = kwargs.pop('param_ind', 0) probability = kwargs.pop('probability', 0.5) kwargs.pop('handle_indexerror', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) ind_str = r'{\hat{' + str(param_ind + 1) + '}}' latex_name_dict = { 'count_samples': r'samples', 'logz': r'$\mathrm{log} \mathcal{Z}$', 'evidence': r'$\mathcal{Z}$', 'r_mean': r'$\overline{|\theta|}$', 'param_mean': r'$\overline{\theta_' + ind_str + '}$', 'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'} # Add credible interval names if probability == 0.5: cred_str = r'$\mathrm{median}(' else: # format percent without trailing zeros percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.') cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}(' latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$' latex_name_dict['r_cred'] = cred_str + r'|\theta|)$' try: return latex_name_dict[func.__name__] except KeyError as err: err.args = err.args + ('get_latex_name not yet set up for ' + func.__name__,) raise
Produce a latex formatted name for each function for use in labelling results. Parameters ---------- func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function.
Below is the the instruction that describes the task: ### Input: Produce a latex formatted name for each function for use in labelling results. Parameters ---------- func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function. ### Response: def get_latex_name(func_in, **kwargs): """ Produce a latex formatted name for each function for use in labelling results. Parameters ---------- func_in: function kwargs: dict, optional Kwargs for function. Returns ------- latex_name: str Latex formatted name for the function. """ if isinstance(func_in, functools.partial): func = func_in.func assert not set(func_in.keywords) & set(kwargs), ( 'kwargs={0} and func_in.keywords={1} contain repeated keys' .format(kwargs, func_in.keywords)) kwargs.update(func_in.keywords) else: func = func_in param_ind = kwargs.pop('param_ind', 0) probability = kwargs.pop('probability', 0.5) kwargs.pop('handle_indexerror', None) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) ind_str = r'{\hat{' + str(param_ind + 1) + '}}' latex_name_dict = { 'count_samples': r'samples', 'logz': r'$\mathrm{log} \mathcal{Z}$', 'evidence': r'$\mathcal{Z}$', 'r_mean': r'$\overline{|\theta|}$', 'param_mean': r'$\overline{\theta_' + ind_str + '}$', 'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'} # Add credible interval names if probability == 0.5: cred_str = r'$\mathrm{median}(' else: # format percent without trailing zeros percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.') cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}(' latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$' latex_name_dict['r_cred'] = cred_str + r'|\theta|)$' try: return latex_name_dict[func.__name__] except KeyError as err: err.args = err.args + ('get_latex_name not yet set up for ' + func.__name__,) raise
def _remove_header(self, data, options): '''Remove header from data''' version_info = self._get_version_info(options['version']) header_size = version_info['header_size'] if options['flags']['timestamp']: header_size += version_info['timestamp_size'] data = data[header_size:] return data
Remove header from data
Below is the the instruction that describes the task: ### Input: Remove header from data ### Response: def _remove_header(self, data, options): '''Remove header from data''' version_info = self._get_version_info(options['version']) header_size = version_info['header_size'] if options['flags']['timestamp']: header_size += version_info['timestamp_size'] data = data[header_size:] return data
def _find_group_coordinator_id(self, group_id): """Find the broker node_id of the coordinator of the given group. Sends a FindCoordinatorRequest message to the cluster. Will block until the FindCoordinatorResponse is received. Any errors are immediately raised. :param group_id: The consumer group ID. This is typically the group name as a string. :return: The node_id of the broker that is the coordinator. """ # Note: Java may change how this is implemented in KAFKA-6791. # # TODO add support for dynamically picking version of # GroupCoordinatorRequest which was renamed to FindCoordinatorRequest. # When I experimented with this, GroupCoordinatorResponse_v1 didn't # match GroupCoordinatorResponse_v0 and I couldn't figure out why. gc_request = GroupCoordinatorRequest[0](group_id) gc_response = self._send_request_to_node(self._client.least_loaded_node(), gc_request) # use the extra error checking in add_group_coordinator() rather than # immediately returning the group coordinator. success = self._client.cluster.add_group_coordinator(group_id, gc_response) if not success: error_type = Errors.for_code(gc_response.error_code) assert error_type is not Errors.NoError # Note: When error_type.retriable, Java will retry... see # KafkaAdminClient's handleFindCoordinatorError method raise error_type( "Could not identify group coordinator for group_id '{}' from response '{}'." .format(group_id, gc_response)) group_coordinator = self._client.cluster.coordinator_for_group(group_id) # will be None if the coordinator was never populated, which should never happen here assert group_coordinator is not None # will be -1 if add_group_coordinator() failed... but by this point the # error should have been raised. assert group_coordinator != -1 return group_coordinator
Find the broker node_id of the coordinator of the given group. Sends a FindCoordinatorRequest message to the cluster. Will block until the FindCoordinatorResponse is received. Any errors are immediately raised. :param group_id: The consumer group ID. This is typically the group name as a string. :return: The node_id of the broker that is the coordinator.
Below is the the instruction that describes the task: ### Input: Find the broker node_id of the coordinator of the given group. Sends a FindCoordinatorRequest message to the cluster. Will block until the FindCoordinatorResponse is received. Any errors are immediately raised. :param group_id: The consumer group ID. This is typically the group name as a string. :return: The node_id of the broker that is the coordinator. ### Response: def _find_group_coordinator_id(self, group_id): """Find the broker node_id of the coordinator of the given group. Sends a FindCoordinatorRequest message to the cluster. Will block until the FindCoordinatorResponse is received. Any errors are immediately raised. :param group_id: The consumer group ID. This is typically the group name as a string. :return: The node_id of the broker that is the coordinator. """ # Note: Java may change how this is implemented in KAFKA-6791. # # TODO add support for dynamically picking version of # GroupCoordinatorRequest which was renamed to FindCoordinatorRequest. # When I experimented with this, GroupCoordinatorResponse_v1 didn't # match GroupCoordinatorResponse_v0 and I couldn't figure out why. gc_request = GroupCoordinatorRequest[0](group_id) gc_response = self._send_request_to_node(self._client.least_loaded_node(), gc_request) # use the extra error checking in add_group_coordinator() rather than # immediately returning the group coordinator. success = self._client.cluster.add_group_coordinator(group_id, gc_response) if not success: error_type = Errors.for_code(gc_response.error_code) assert error_type is not Errors.NoError # Note: When error_type.retriable, Java will retry... see # KafkaAdminClient's handleFindCoordinatorError method raise error_type( "Could not identify group coordinator for group_id '{}' from response '{}'." .format(group_id, gc_response)) group_coordinator = self._client.cluster.coordinator_for_group(group_id) # will be None if the coordinator was never populated, which should never happen here assert group_coordinator is not None # will be -1 if add_group_coordinator() failed... but by this point the # error should have been raised. assert group_coordinator != -1 return group_coordinator
def _(text, *args, **kwargs): """Translate and then and format the text with ``str.format``.""" msg = _t.gettext(text) if args or kwargs: return msg.format(*args, **kwargs) else: return msg
Translate and then and format the text with ``str.format``.
Below is the the instruction that describes the task: ### Input: Translate and then and format the text with ``str.format``. ### Response: def _(text, *args, **kwargs): """Translate and then and format the text with ``str.format``.""" msg = _t.gettext(text) if args or kwargs: return msg.format(*args, **kwargs) else: return msg
def parse(cls, args): """ Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct """ try: (options, args) = cls.optparser.parse_args(args) if options.db_tap_id is None: raise ParseError("db_tap_id is required", cls.optparser.format_help()) if options.query is None and options.script_location is None: raise ParseError("query or script location is required", cls.optparser.format_help()) if options.script_location is not None: if options.query is not None: raise ParseError( "Both query and script_location cannot be specified", cls.optparser.format_help()) if ((options.script_location.find("s3://") != 0) and (options.script_location.find("s3n://") != 0)): # script location is local file try: q = open(options.script_location).read() except IOError as e: raise ParseError("Unable to open script location: %s" % str(e), cls.optparser.format_help()) options.script_location = None options.query = q except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None if options.macros is not None: options.macros = json.loads(options.macros) v = vars(options) v["command_type"] = "DbTapQueryCommand" return v
Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct
Below is the the instruction that describes the task: ### Input: Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct ### Response: def parse(cls, args): """ Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct """ try: (options, args) = cls.optparser.parse_args(args) if options.db_tap_id is None: raise ParseError("db_tap_id is required", cls.optparser.format_help()) if options.query is None and options.script_location is None: raise ParseError("query or script location is required", cls.optparser.format_help()) if options.script_location is not None: if options.query is not None: raise ParseError( "Both query and script_location cannot be specified", cls.optparser.format_help()) if ((options.script_location.find("s3://") != 0) and (options.script_location.find("s3n://") != 0)): # script location is local file try: q = open(options.script_location).read() except IOError as e: raise ParseError("Unable to open script location: %s" % str(e), cls.optparser.format_help()) options.script_location = None options.query = q except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None if options.macros is not None: options.macros = json.loads(options.macros) v = vars(options) v["command_type"] = "DbTapQueryCommand" return v
def unpack_ambiguous(s): """ List sequences with ambiguous characters in all possibilities. """ sd = [ambiguous_dna_values[x] for x in s] return ["".join(x) for x in list(product(*sd))]
List sequences with ambiguous characters in all possibilities.
Below is the the instruction that describes the task: ### Input: List sequences with ambiguous characters in all possibilities. ### Response: def unpack_ambiguous(s): """ List sequences with ambiguous characters in all possibilities. """ sd = [ambiguous_dna_values[x] for x in s] return ["".join(x) for x in list(product(*sd))]
def _disp_width(self, pwcs, n=None): """ A wcswidth that never gives -1. Copying existing code is evil, but.. github.com/jquast/wcwidth/blob/07cea7f/wcwidth/wcwidth.py#L182-L204 """ # pylint: disable=C0103 # Invalid argument name "n" # TODO: Shall we consider things like ANSI escape seqs here? # We can implement some ignore-me segment like those wrapped by # \1 and \2 in readline too. end = len(pwcs) if n is None else n idx = slice(0, end) width = 0 for char in pwcs[idx]: width += max(0, wcwidth(char)) return width
A wcswidth that never gives -1. Copying existing code is evil, but.. github.com/jquast/wcwidth/blob/07cea7f/wcwidth/wcwidth.py#L182-L204
Below is the the instruction that describes the task: ### Input: A wcswidth that never gives -1. Copying existing code is evil, but.. github.com/jquast/wcwidth/blob/07cea7f/wcwidth/wcwidth.py#L182-L204 ### Response: def _disp_width(self, pwcs, n=None): """ A wcswidth that never gives -1. Copying existing code is evil, but.. github.com/jquast/wcwidth/blob/07cea7f/wcwidth/wcwidth.py#L182-L204 """ # pylint: disable=C0103 # Invalid argument name "n" # TODO: Shall we consider things like ANSI escape seqs here? # We can implement some ignore-me segment like those wrapped by # \1 and \2 in readline too. end = len(pwcs) if n is None else n idx = slice(0, end) width = 0 for char in pwcs[idx]: width += max(0, wcwidth(char)) return width
def finalize(self): """ Finalize the run - build the name generator and use it to build the remap symbol tables. """ self.global_scope.close() name_generator = NameGenerator(skip=self.reserved_keywords) self.global_scope.build_remap_symbols( name_generator, children_only=not self.obfuscate_globals, )
Finalize the run - build the name generator and use it to build the remap symbol tables.
Below is the the instruction that describes the task: ### Input: Finalize the run - build the name generator and use it to build the remap symbol tables. ### Response: def finalize(self): """ Finalize the run - build the name generator and use it to build the remap symbol tables. """ self.global_scope.close() name_generator = NameGenerator(skip=self.reserved_keywords) self.global_scope.build_remap_symbols( name_generator, children_only=not self.obfuscate_globals, )
def parse_option(self, option, block_name, *values): """ Parse duration option for timer. """ try: if len(values) != 1: raise TypeError self.total_duration = int(values[0]) if self.total_duration <= 0: raise ValueError except ValueError: pattern = u'"{0}" must be an integer > 0' raise ValueError(pattern.format(option))
Parse duration option for timer.
Below is the the instruction that describes the task: ### Input: Parse duration option for timer. ### Response: def parse_option(self, option, block_name, *values): """ Parse duration option for timer. """ try: if len(values) != 1: raise TypeError self.total_duration = int(values[0]) if self.total_duration <= 0: raise ValueError except ValueError: pattern = u'"{0}" must be an integer > 0' raise ValueError(pattern.format(option))
def add_node(self, node): """Add an agent, connecting it to the previous node.""" other_nodes = [n for n in self.nodes() if n.id != node.id] if isinstance(node, Source) and other_nodes: raise(Exception("Chain network already has a nodes, " "can't add a source.")) if other_nodes: parent = max(other_nodes, key=attrgetter('creation_time')) parent.connect(whom=node)
Add an agent, connecting it to the previous node.
Below is the the instruction that describes the task: ### Input: Add an agent, connecting it to the previous node. ### Response: def add_node(self, node): """Add an agent, connecting it to the previous node.""" other_nodes = [n for n in self.nodes() if n.id != node.id] if isinstance(node, Source) and other_nodes: raise(Exception("Chain network already has a nodes, " "can't add a source.")) if other_nodes: parent = max(other_nodes, key=attrgetter('creation_time')) parent.connect(whom=node)
def compression_type(self): """Return the latest compresion type used in this MAR. Returns: One of None, 'bz2', or 'xz' """ best_compression = None for e in self.mardata.index.entries: self.fileobj.seek(e.offset) magic = self.fileobj.read(10) compression = guess_compression(magic) if compression == 'xz': best_compression = 'xz' break elif compression == 'bz2' and best_compression is None: best_compression = 'bz2' return best_compression
Return the latest compresion type used in this MAR. Returns: One of None, 'bz2', or 'xz'
Below is the the instruction that describes the task: ### Input: Return the latest compresion type used in this MAR. Returns: One of None, 'bz2', or 'xz' ### Response: def compression_type(self): """Return the latest compresion type used in this MAR. Returns: One of None, 'bz2', or 'xz' """ best_compression = None for e in self.mardata.index.entries: self.fileobj.seek(e.offset) magic = self.fileobj.read(10) compression = guess_compression(magic) if compression == 'xz': best_compression = 'xz' break elif compression == 'bz2' and best_compression is None: best_compression = 'bz2' return best_compression
def update_has_children(self, tree_alias, tree_items, navigation_type): """Updates 'has_children' attribute for tree items inplace. :param str|unicode tree_alias: :param list tree_items: :param str|unicode navigation_type: sitetree, breadcrumbs, menu """ get_children = self.get_children filter_items = self.filter_items apply_hook = self.apply_hook for tree_item in tree_items: children = get_children(tree_alias, tree_item) children = filter_items(children, navigation_type) children = apply_hook(children, '%s.has_children' % navigation_type) tree_item.has_children = len(children) > 0
Updates 'has_children' attribute for tree items inplace. :param str|unicode tree_alias: :param list tree_items: :param str|unicode navigation_type: sitetree, breadcrumbs, menu
Below is the the instruction that describes the task: ### Input: Updates 'has_children' attribute for tree items inplace. :param str|unicode tree_alias: :param list tree_items: :param str|unicode navigation_type: sitetree, breadcrumbs, menu ### Response: def update_has_children(self, tree_alias, tree_items, navigation_type): """Updates 'has_children' attribute for tree items inplace. :param str|unicode tree_alias: :param list tree_items: :param str|unicode navigation_type: sitetree, breadcrumbs, menu """ get_children = self.get_children filter_items = self.filter_items apply_hook = self.apply_hook for tree_item in tree_items: children = get_children(tree_alias, tree_item) children = filter_items(children, navigation_type) children = apply_hook(children, '%s.has_children' % navigation_type) tree_item.has_children = len(children) > 0
def NewData(self, text): ''' Method which is called by the SAX parser upon encountering text inside a tag :param text: the text encountered :return: None, has side effects modifying the class itself ''' sint = ignore_exception(ValueError)(int) if len(self.tags) > 0: if self.tags[-1] == "beat-type" or self.tags[-1] == "beats": if sint(text) is int: self.chars[self.tags[-1]] = text if self.validateData(text): if len(self.tags) > 0: if self.tags[-1] not in self.chars: self.chars[self.tags[-1]] = text else: self.chars[self.tags[-1]] += text
Method which is called by the SAX parser upon encountering text inside a tag :param text: the text encountered :return: None, has side effects modifying the class itself
Below is the the instruction that describes the task: ### Input: Method which is called by the SAX parser upon encountering text inside a tag :param text: the text encountered :return: None, has side effects modifying the class itself ### Response: def NewData(self, text): ''' Method which is called by the SAX parser upon encountering text inside a tag :param text: the text encountered :return: None, has side effects modifying the class itself ''' sint = ignore_exception(ValueError)(int) if len(self.tags) > 0: if self.tags[-1] == "beat-type" or self.tags[-1] == "beats": if sint(text) is int: self.chars[self.tags[-1]] = text if self.validateData(text): if len(self.tags) > 0: if self.tags[-1] not in self.chars: self.chars[self.tags[-1]] = text else: self.chars[self.tags[-1]] += text
def write(self, data): """Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param data: data to be written. :type data: bytes :return: Number of bytes actually transferred, return value of the library call. :rtype: (int, VISAStatus) """ send_end, _ = self.get_attribute(constants.VI_ATTR_SEND_END_EN) count = self.interface.write(data) return count, StatusCode.success
Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param data: data to be written. :type data: bytes :return: Number of bytes actually transferred, return value of the library call. :rtype: (int, VISAStatus)
Below is the the instruction that describes the task: ### Input: Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param data: data to be written. :type data: bytes :return: Number of bytes actually transferred, return value of the library call. :rtype: (int, VISAStatus) ### Response: def write(self, data): """Writes data to device or interface synchronously. Corresponds to viWrite function of the VISA library. :param data: data to be written. :type data: bytes :return: Number of bytes actually transferred, return value of the library call. :rtype: (int, VISAStatus) """ send_end, _ = self.get_attribute(constants.VI_ATTR_SEND_END_EN) count = self.interface.write(data) return count, StatusCode.success
def relStdDev(self, limit=None): """return the relative standard deviation optionally limited to the last limit values""" moments = self.meanAndStdDev(limit) if moments is None: return None return moments[1] / moments[0]
return the relative standard deviation optionally limited to the last limit values
Below is the the instruction that describes the task: ### Input: return the relative standard deviation optionally limited to the last limit values ### Response: def relStdDev(self, limit=None): """return the relative standard deviation optionally limited to the last limit values""" moments = self.meanAndStdDev(limit) if moments is None: return None return moments[1] / moments[0]
def get_attribute_type(o_attr): ''' Get the base data type (S_DT) associated with a BridgePoint attribute. ''' ref_o_attr = one(o_attr).O_RATTR[106].O_BATTR[113].O_ATTR[106]() if ref_o_attr: return get_attribute_type(ref_o_attr) else: return one(o_attr).S_DT[114]()
Get the base data type (S_DT) associated with a BridgePoint attribute.
Below is the the instruction that describes the task: ### Input: Get the base data type (S_DT) associated with a BridgePoint attribute. ### Response: def get_attribute_type(o_attr): ''' Get the base data type (S_DT) associated with a BridgePoint attribute. ''' ref_o_attr = one(o_attr).O_RATTR[106].O_BATTR[113].O_ATTR[106]() if ref_o_attr: return get_attribute_type(ref_o_attr) else: return one(o_attr).S_DT[114]()
def get_endpoints_using_catalog_api(domain, token): """ Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API. """ # Token required for all requests. Providing login info instead is also possible but I didn't implement it. headers = {"X-App-Token": token} # The API will return only 100 requests at a time by default. We can ask for more, but the server seems to start # to lag after a certain N requested. Instead, let's pick a less conservative pagination limit and spool up with # offsets. # # At the time this library was written, Socrata would return all of its results in a contiguous list. Once you # maxed out, you wouldn't get any more list items. Later on this was changed so that now if you exhaust portal # entities, it will actually take you back to the beginning of the list again! # # As a result we need to perform our own set-wise check to make sure that what we get isn't just a bit of the # same list all over again. uri = "http://api.us.socrata.com/api/catalog/v1?domains={0}&offset={1}&limit=1000" ret = [] endpoints_thus_far = set() offset = 0 while True: try: r = requests.get(uri.format(domain, offset), headers=headers) r.raise_for_status() except requests.HTTPError: raise requests.HTTPError("An HTTP error was raised during Socrata API ingestion.".format(domain)) data = r.json() endpoints_returned = {r['resource']['id'] for r in data['results']} new_endpoints = endpoints_returned.difference(endpoints_thus_far) if len(new_endpoints) >= 999: # we are continuing to stream # TODO: 999 not 1000 b/c the API suffers off-by-one errors. Can also do worse, however. Compensate? # cf. https://github.com/ResidentMario/pysocrata/issues/1 ret += data['results'] endpoints_thus_far.update(new_endpoints) offset += 1000 continue else: # we are ending on a stream with some old endpoints on it ret += [r for r in data['results'] if r['resource']['id'] in new_endpoints] break return ret
Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API.
Below is the the instruction that describes the task: ### Input: Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API. ### Response: def get_endpoints_using_catalog_api(domain, token): """ Implements a raw HTTP GET against the entire Socrata portal for the domain in question. This method uses the second of the two ways of getting this information, the catalog API. Parameters ---------- domain: str A Socrata data portal domain. "data.seattle.gov" or "data.cityofnewyork.us" for example. token: str A Socrata application token. Application tokens can be registered by going onto the Socrata portal in question, creating an account, logging in, going to developer tools, and spawning a token. Returns ------- Portal dataset metadata from the catalog API. """ # Token required for all requests. Providing login info instead is also possible but I didn't implement it. headers = {"X-App-Token": token} # The API will return only 100 requests at a time by default. We can ask for more, but the server seems to start # to lag after a certain N requested. Instead, let's pick a less conservative pagination limit and spool up with # offsets. # # At the time this library was written, Socrata would return all of its results in a contiguous list. Once you # maxed out, you wouldn't get any more list items. Later on this was changed so that now if you exhaust portal # entities, it will actually take you back to the beginning of the list again! # # As a result we need to perform our own set-wise check to make sure that what we get isn't just a bit of the # same list all over again. uri = "http://api.us.socrata.com/api/catalog/v1?domains={0}&offset={1}&limit=1000" ret = [] endpoints_thus_far = set() offset = 0 while True: try: r = requests.get(uri.format(domain, offset), headers=headers) r.raise_for_status() except requests.HTTPError: raise requests.HTTPError("An HTTP error was raised during Socrata API ingestion.".format(domain)) data = r.json() endpoints_returned = {r['resource']['id'] for r in data['results']} new_endpoints = endpoints_returned.difference(endpoints_thus_far) if len(new_endpoints) >= 999: # we are continuing to stream # TODO: 999 not 1000 b/c the API suffers off-by-one errors. Can also do worse, however. Compensate? # cf. https://github.com/ResidentMario/pysocrata/issues/1 ret += data['results'] endpoints_thus_far.update(new_endpoints) offset += 1000 continue else: # we are ending on a stream with some old endpoints on it ret += [r for r in data['results'] if r['resource']['id'] in new_endpoints] break return ret
def pack_field(self, value): """ Pack single field (string or integer value) <field> ::= <int32_varint><data> :param value: value to be packed :type value: bytes, str, int or long :return: packed value :rtype: bytes """ if isinstance(value, str): return self.pack_str(value) elif isinstance(value, unicode): return self.pack_unicode(value, self.charset, self.errors) elif isinstance(value, int): return self.pack_int(value) elif isinstance(value, long): return self.pack_long(value) else: raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
Pack single field (string or integer value) <field> ::= <int32_varint><data> :param value: value to be packed :type value: bytes, str, int or long :return: packed value :rtype: bytes
Below is the the instruction that describes the task: ### Input: Pack single field (string or integer value) <field> ::= <int32_varint><data> :param value: value to be packed :type value: bytes, str, int or long :return: packed value :rtype: bytes ### Response: def pack_field(self, value): """ Pack single field (string or integer value) <field> ::= <int32_varint><data> :param value: value to be packed :type value: bytes, str, int or long :return: packed value :rtype: bytes """ if isinstance(value, str): return self.pack_str(value) elif isinstance(value, unicode): return self.pack_unicode(value, self.charset, self.errors) elif isinstance(value, int): return self.pack_int(value) elif isinstance(value, long): return self.pack_long(value) else: raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
def create_manifest_table(dynamodb_client, table_name): """Create DynamoDB table for run manifests Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name """ try: dynamodb_client.create_table( AttributeDefinitions=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'AttributeType': 'S' }, ], TableName=table_name, KeySchema=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'KeyType': 'HASH' }, ], ProvisionedThroughput={ 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } ) dynamodb_client.get_waiter('table_exists').wait(TableName=table_name) except ClientError as e: # Table already exists if e.response['Error']['Code'] == 'ResourceInUseException': pass else: raise e
Create DynamoDB table for run manifests Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name
Below is the the instruction that describes the task: ### Input: Create DynamoDB table for run manifests Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name ### Response: def create_manifest_table(dynamodb_client, table_name): """Create DynamoDB table for run manifests Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name """ try: dynamodb_client.create_table( AttributeDefinitions=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'AttributeType': 'S' }, ], TableName=table_name, KeySchema=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'KeyType': 'HASH' }, ], ProvisionedThroughput={ 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } ) dynamodb_client.get_waiter('table_exists').wait(TableName=table_name) except ClientError as e: # Table already exists if e.response['Error']['Code'] == 'ResourceInUseException': pass else: raise e
def edges(s, edges, alpha=1.0, weighted=False, directed=False): """ Visualization of the edges in a network. """ p = s._ctx.BezierPath() if directed and s.stroke: pd = s._ctx.BezierPath() if weighted and s.fill: pw = [s._ctx.BezierPath() for i in range(11)] # Draw the edges in a single BezierPath for speed. # Weighted edges are divided into ten BezierPaths, # depending on their weight rounded between 0 and 10. if len(edges) == 0: return for e in edges: try: s2 = e.node1.graph.styles[e.node1.style] except: s2 = s if s2.edge: s2.edge(s2, p, e, alpha) if directed and s.stroke: s2.edge_arrow(s2, pd, e, radius=10) if weighted and s.fill: s2.edge(s2, pw[int(e.weight*10)], e, alpha) s._ctx.autoclosepath(False) s._ctx.nofill() s._ctx.nostroke() # All weighted edges use the default fill. if weighted and s.fill: r = e.node1.__class__(None).r s._ctx.stroke( s.fill.r, s.fill.g, s.fill.b, s.fill.a * 0.65 * alpha ) for w in range(1, len(pw)): s._ctx.strokewidth(r*w*0.1) s._ctx.drawpath(pw[w].copy()) # All edges use the default stroke. if s.stroke: s._ctx.strokewidth(s.strokewidth) s._ctx.stroke( s.stroke.r, s.stroke.g, s.stroke.b, s.stroke.a * 0.65 * alpha ) s._ctx.drawpath(p.copy()) if directed and s.stroke: #clr = s._ctx.stroke().copy() clr=s._ctx.color( s.stroke.r, s.stroke.g, s.stroke.b, s.stroke.a * 0.65 * alpha ) clr.a *= 1.3 s._ctx.stroke(clr) s._ctx.drawpath(pd.copy()) for e in edges: try: s2 = self.styles[e.node1.style] except: s2 = s if s2.edge_label: s2.edge_label(s2, e, alpha)
Visualization of the edges in a network.
Below is the the instruction that describes the task: ### Input: Visualization of the edges in a network. ### Response: def edges(s, edges, alpha=1.0, weighted=False, directed=False): """ Visualization of the edges in a network. """ p = s._ctx.BezierPath() if directed and s.stroke: pd = s._ctx.BezierPath() if weighted and s.fill: pw = [s._ctx.BezierPath() for i in range(11)] # Draw the edges in a single BezierPath for speed. # Weighted edges are divided into ten BezierPaths, # depending on their weight rounded between 0 and 10. if len(edges) == 0: return for e in edges: try: s2 = e.node1.graph.styles[e.node1.style] except: s2 = s if s2.edge: s2.edge(s2, p, e, alpha) if directed and s.stroke: s2.edge_arrow(s2, pd, e, radius=10) if weighted and s.fill: s2.edge(s2, pw[int(e.weight*10)], e, alpha) s._ctx.autoclosepath(False) s._ctx.nofill() s._ctx.nostroke() # All weighted edges use the default fill. if weighted and s.fill: r = e.node1.__class__(None).r s._ctx.stroke( s.fill.r, s.fill.g, s.fill.b, s.fill.a * 0.65 * alpha ) for w in range(1, len(pw)): s._ctx.strokewidth(r*w*0.1) s._ctx.drawpath(pw[w].copy()) # All edges use the default stroke. if s.stroke: s._ctx.strokewidth(s.strokewidth) s._ctx.stroke( s.stroke.r, s.stroke.g, s.stroke.b, s.stroke.a * 0.65 * alpha ) s._ctx.drawpath(p.copy()) if directed and s.stroke: #clr = s._ctx.stroke().copy() clr=s._ctx.color( s.stroke.r, s.stroke.g, s.stroke.b, s.stroke.a * 0.65 * alpha ) clr.a *= 1.3 s._ctx.stroke(clr) s._ctx.drawpath(pd.copy()) for e in edges: try: s2 = self.styles[e.node1.style] except: s2 = s if s2.edge_label: s2.edge_label(s2, e, alpha)
def republish(self, hash, count=None, sources=None, destinations=None): """ Rebroadcast blocks starting at **hash** to the network :param hash: Hash of block to start rebroadcasting from :type hash: str :param count: Max number of blocks to rebroadcast :type count: int :param sources: If set, additionally rebroadcasts source chain blocks for receive/open up to **sources** depth :type sources: int :param destinations: If set, additionally rebroadcasts destination chain blocks for receive/open up to **destinations** depth :type destinations: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.republish( ... hash="991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948" ... ) [ "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293" ] """ hash = self._process_value(hash, 'block') payload = {"hash": hash} if count is not None: payload['count'] = self._process_value(count, 'int') if sources is not None: payload['sources'] = self._process_value(sources, 'int') if destinations is not None: payload['destinations'] = self._process_value(destinations, 'int') resp = self.call('republish', payload) return resp.get('blocks') or []
Rebroadcast blocks starting at **hash** to the network :param hash: Hash of block to start rebroadcasting from :type hash: str :param count: Max number of blocks to rebroadcast :type count: int :param sources: If set, additionally rebroadcasts source chain blocks for receive/open up to **sources** depth :type sources: int :param destinations: If set, additionally rebroadcasts destination chain blocks for receive/open up to **destinations** depth :type destinations: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.republish( ... hash="991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948" ... ) [ "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293" ]
Below is the the instruction that describes the task: ### Input: Rebroadcast blocks starting at **hash** to the network :param hash: Hash of block to start rebroadcasting from :type hash: str :param count: Max number of blocks to rebroadcast :type count: int :param sources: If set, additionally rebroadcasts source chain blocks for receive/open up to **sources** depth :type sources: int :param destinations: If set, additionally rebroadcasts destination chain blocks for receive/open up to **destinations** depth :type destinations: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.republish( ... hash="991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948" ... ) [ "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293" ] ### Response: def republish(self, hash, count=None, sources=None, destinations=None): """ Rebroadcast blocks starting at **hash** to the network :param hash: Hash of block to start rebroadcasting from :type hash: str :param count: Max number of blocks to rebroadcast :type count: int :param sources: If set, additionally rebroadcasts source chain blocks for receive/open up to **sources** depth :type sources: int :param destinations: If set, additionally rebroadcasts destination chain blocks for receive/open up to **destinations** depth :type destinations: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.republish( ... hash="991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948" ... ) [ "991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948", "A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293" ] """ hash = self._process_value(hash, 'block') payload = {"hash": hash} if count is not None: payload['count'] = self._process_value(count, 'int') if sources is not None: payload['sources'] = self._process_value(sources, 'int') if destinations is not None: payload['destinations'] = self._process_value(destinations, 'int') resp = self.call('republish', payload) return resp.get('blocks') or []
def board(self, *, _cache: bool = False) -> chess.Board: """ Gets the starting position of the game. Unless the ``FEN`` header tag is set, this is the default starting position (for the ``Variant``). """ return self.headers.board()
Gets the starting position of the game. Unless the ``FEN`` header tag is set, this is the default starting position (for the ``Variant``).
Below is the the instruction that describes the task: ### Input: Gets the starting position of the game. Unless the ``FEN`` header tag is set, this is the default starting position (for the ``Variant``). ### Response: def board(self, *, _cache: bool = False) -> chess.Board: """ Gets the starting position of the game. Unless the ``FEN`` header tag is set, this is the default starting position (for the ``Variant``). """ return self.headers.board()
def ltrimboth (l,proportiontocut): """ Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l """ lowercut = int(proportiontocut*len(l)) uppercut = len(l) - lowercut return l[lowercut:uppercut]
Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l
Below is the the instruction that describes the task: ### Input: Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l ### Response: def ltrimboth (l,proportiontocut): """ Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l """ lowercut = int(proportiontocut*len(l)) uppercut = len(l) - lowercut return l[lowercut:uppercut]
def call(self, lowstrike, highstrike, expiry): """ Metrics for evaluating a bull call spread. The metrics returned can easily be translated into bear spread metrics. The difference is only whether one buys the call at the lower strike and sells at the higher (bull call spread) or sells at the lower while buying at the higher (bear call spread). The metrics dataframe shows values for a bull call spread, where the transaction is a debit. A bear call spread is created by *selling* a bull call spread, so the transaction amount is the same, but it is a credit rather than a debit. Parameters ------------ lowstrike : numeric Lower strike price. highstrike : numeric Higher strike sprice. expiry : date or date str (e.g. '2015-01-01') Expiration date. Returns ------------ metrics : DataFrame Notes ----- Cf. Lawrence McMillan, Options as a Strategic Investment, 5th ed., pp. 157ff. See Also -------- :meth:`put` """ assert lowstrike < highstrike _rows = {} _prices = {} _opttype = 'call' for _strike in (lowstrike, highstrike): _rows[_strike] = _relevant_rows(self.data, (_strike, expiry, _opttype,), "No key for {} strike {} {}".format(expiry, _strike, _opttype)) _prices[_strike] = _getprice(_rows[_strike]) _eq = _rows[lowstrike].loc[:, 'Underlying_Price'].values[0] _qt = _rows[lowstrike].loc[:, 'Quote_Time'].values[0] _debit = _prices[lowstrike] - _prices[highstrike] _breakeven = lowstrike + _debit if _breakeven > highstrike: _breakeven = np.nan _maxprof = highstrike - lowstrike -_debit _index = ['Low Strike Call', 'High Strike Call', 'Debit', 'Break_Even', 'Max Profit', 'Underlying_Price', 'Quote_Time'] _vals = np.array([_prices[lowstrike], _prices[highstrike], _debit, _breakeven, _maxprof, _eq, _qt]) return pd.DataFrame(_vals, index=_index, columns=['Value'])
Metrics for evaluating a bull call spread. The metrics returned can easily be translated into bear spread metrics. The difference is only whether one buys the call at the lower strike and sells at the higher (bull call spread) or sells at the lower while buying at the higher (bear call spread). The metrics dataframe shows values for a bull call spread, where the transaction is a debit. A bear call spread is created by *selling* a bull call spread, so the transaction amount is the same, but it is a credit rather than a debit. Parameters ------------ lowstrike : numeric Lower strike price. highstrike : numeric Higher strike sprice. expiry : date or date str (e.g. '2015-01-01') Expiration date. Returns ------------ metrics : DataFrame Notes ----- Cf. Lawrence McMillan, Options as a Strategic Investment, 5th ed., pp. 157ff. See Also -------- :meth:`put`
Below is the the instruction that describes the task: ### Input: Metrics for evaluating a bull call spread. The metrics returned can easily be translated into bear spread metrics. The difference is only whether one buys the call at the lower strike and sells at the higher (bull call spread) or sells at the lower while buying at the higher (bear call spread). The metrics dataframe shows values for a bull call spread, where the transaction is a debit. A bear call spread is created by *selling* a bull call spread, so the transaction amount is the same, but it is a credit rather than a debit. Parameters ------------ lowstrike : numeric Lower strike price. highstrike : numeric Higher strike sprice. expiry : date or date str (e.g. '2015-01-01') Expiration date. Returns ------------ metrics : DataFrame Notes ----- Cf. Lawrence McMillan, Options as a Strategic Investment, 5th ed., pp. 157ff. See Also -------- :meth:`put` ### Response: def call(self, lowstrike, highstrike, expiry): """ Metrics for evaluating a bull call spread. The metrics returned can easily be translated into bear spread metrics. The difference is only whether one buys the call at the lower strike and sells at the higher (bull call spread) or sells at the lower while buying at the higher (bear call spread). The metrics dataframe shows values for a bull call spread, where the transaction is a debit. A bear call spread is created by *selling* a bull call spread, so the transaction amount is the same, but it is a credit rather than a debit. Parameters ------------ lowstrike : numeric Lower strike price. highstrike : numeric Higher strike sprice. expiry : date or date str (e.g. '2015-01-01') Expiration date. Returns ------------ metrics : DataFrame Notes ----- Cf. Lawrence McMillan, Options as a Strategic Investment, 5th ed., pp. 157ff. See Also -------- :meth:`put` """ assert lowstrike < highstrike _rows = {} _prices = {} _opttype = 'call' for _strike in (lowstrike, highstrike): _rows[_strike] = _relevant_rows(self.data, (_strike, expiry, _opttype,), "No key for {} strike {} {}".format(expiry, _strike, _opttype)) _prices[_strike] = _getprice(_rows[_strike]) _eq = _rows[lowstrike].loc[:, 'Underlying_Price'].values[0] _qt = _rows[lowstrike].loc[:, 'Quote_Time'].values[0] _debit = _prices[lowstrike] - _prices[highstrike] _breakeven = lowstrike + _debit if _breakeven > highstrike: _breakeven = np.nan _maxprof = highstrike - lowstrike -_debit _index = ['Low Strike Call', 'High Strike Call', 'Debit', 'Break_Even', 'Max Profit', 'Underlying_Price', 'Quote_Time'] _vals = np.array([_prices[lowstrike], _prices[highstrike], _debit, _breakeven, _maxprof, _eq, _qt]) return pd.DataFrame(_vals, index=_index, columns=['Value'])
def get_draft_version(self, expand=[]): """ Get the current draft version of this layer. :raises NotFound: if there is no draft version. """ target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id}) return self._manager._get(target_url, expand=expand)
Get the current draft version of this layer. :raises NotFound: if there is no draft version.
Below is the the instruction that describes the task: ### Input: Get the current draft version of this layer. :raises NotFound: if there is no draft version. ### Response: def get_draft_version(self, expand=[]): """ Get the current draft version of this layer. :raises NotFound: if there is no draft version. """ target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id}) return self._manager._get(target_url, expand=expand)
def check_messages(*messages: str) -> Callable: """decorator to store messages that are handled by a checker method""" def store_messages(func): func.checks_msgs = messages return func return store_messages
decorator to store messages that are handled by a checker method
Below is the the instruction that describes the task: ### Input: decorator to store messages that are handled by a checker method ### Response: def check_messages(*messages: str) -> Callable: """decorator to store messages that are handled by a checker method""" def store_messages(func): func.checks_msgs = messages return func return store_messages
def _ReadPartitionKeyRanges(self, collection_link, feed_options=None): """Reads Partition Key Ranges. :param str collection_link: The link to the document collection. :param dict feed_options: :return: Query Iterable of PartitionKeyRanges. :rtype: query_iterable.QueryIterable """ if feed_options is None: feed_options = {} return self._QueryPartitionKeyRanges(collection_link, None, feed_options)
Reads Partition Key Ranges. :param str collection_link: The link to the document collection. :param dict feed_options: :return: Query Iterable of PartitionKeyRanges. :rtype: query_iterable.QueryIterable
Below is the the instruction that describes the task: ### Input: Reads Partition Key Ranges. :param str collection_link: The link to the document collection. :param dict feed_options: :return: Query Iterable of PartitionKeyRanges. :rtype: query_iterable.QueryIterable ### Response: def _ReadPartitionKeyRanges(self, collection_link, feed_options=None): """Reads Partition Key Ranges. :param str collection_link: The link to the document collection. :param dict feed_options: :return: Query Iterable of PartitionKeyRanges. :rtype: query_iterable.QueryIterable """ if feed_options is None: feed_options = {} return self._QueryPartitionKeyRanges(collection_link, None, feed_options)
def _close(self, fd): """ Removes a file descriptor from the file descriptor list :rtype: int :param fd: the file descriptor to close. :return: C{0} on success. """ try: self.files[fd].close() self._closed_files.append(self.files[fd]) # Keep track for SymbolicFile testcase generation self.files[fd] = None except IndexError: raise FdError(f"Bad file descriptor ({fd})")
Removes a file descriptor from the file descriptor list :rtype: int :param fd: the file descriptor to close. :return: C{0} on success.
Below is the the instruction that describes the task: ### Input: Removes a file descriptor from the file descriptor list :rtype: int :param fd: the file descriptor to close. :return: C{0} on success. ### Response: def _close(self, fd): """ Removes a file descriptor from the file descriptor list :rtype: int :param fd: the file descriptor to close. :return: C{0} on success. """ try: self.files[fd].close() self._closed_files.append(self.files[fd]) # Keep track for SymbolicFile testcase generation self.files[fd] = None except IndexError: raise FdError(f"Bad file descriptor ({fd})")
def ok(self, data, schema=None, envelope=None): """ Gets a 200 response with the specified data. :param data: The content value. :param schema: The schema to serialize the data. :param envelope: The key used to envelope the data. :return: A Flask response object. """ data = marshal(data, schema, envelope) return self.__make_response(data)
Gets a 200 response with the specified data. :param data: The content value. :param schema: The schema to serialize the data. :param envelope: The key used to envelope the data. :return: A Flask response object.
Below is the the instruction that describes the task: ### Input: Gets a 200 response with the specified data. :param data: The content value. :param schema: The schema to serialize the data. :param envelope: The key used to envelope the data. :return: A Flask response object. ### Response: def ok(self, data, schema=None, envelope=None): """ Gets a 200 response with the specified data. :param data: The content value. :param schema: The schema to serialize the data. :param envelope: The key used to envelope the data. :return: A Flask response object. """ data = marshal(data, schema, envelope) return self.__make_response(data)
def extract_ref(scihdu, refhdu): """Extract section of the reference image that corresponds to the given science image. This only returns a view, not a copy of the reference image's array. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- refdata : array-like Section of the relevant reference image. Raises ------ NotImplementedError Either science or reference data are binned. ValueError Extracted section size mismatch. """ same_size, rx, ry, x0, y0 = find_line(scihdu, refhdu) # Use the whole reference image if same_size: return refhdu.data # Binned data if rx != 1 or ry != 1: raise NotImplementedError( 'Either science or reference data are binned') # Extract a view of the sub-section ny, nx = scihdu.data.shape refdata = refhdu.data[y0:y0+ny, x0:x0+nx] if refdata.shape != (ny, nx): raise ValueError('Extracted reference image is {0} but science image ' 'is {1}'.format(refdata.shape, (ny, nx))) return refdata
Extract section of the reference image that corresponds to the given science image. This only returns a view, not a copy of the reference image's array. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- refdata : array-like Section of the relevant reference image. Raises ------ NotImplementedError Either science or reference data are binned. ValueError Extracted section size mismatch.
Below is the the instruction that describes the task: ### Input: Extract section of the reference image that corresponds to the given science image. This only returns a view, not a copy of the reference image's array. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- refdata : array-like Section of the relevant reference image. Raises ------ NotImplementedError Either science or reference data are binned. ValueError Extracted section size mismatch. ### Response: def extract_ref(scihdu, refhdu): """Extract section of the reference image that corresponds to the given science image. This only returns a view, not a copy of the reference image's array. Parameters ---------- scihdu, refhdu : obj Extension HDU's of the science and reference image, respectively. Returns ------- refdata : array-like Section of the relevant reference image. Raises ------ NotImplementedError Either science or reference data are binned. ValueError Extracted section size mismatch. """ same_size, rx, ry, x0, y0 = find_line(scihdu, refhdu) # Use the whole reference image if same_size: return refhdu.data # Binned data if rx != 1 or ry != 1: raise NotImplementedError( 'Either science or reference data are binned') # Extract a view of the sub-section ny, nx = scihdu.data.shape refdata = refhdu.data[y0:y0+ny, x0:x0+nx] if refdata.shape != (ny, nx): raise ValueError('Extracted reference image is {0} but science image ' 'is {1}'.format(refdata.shape, (ny, nx))) return refdata
def set_args(self, arguments): """ Setup the command line arguments, the first item must be an (absolute) filename to run. """ return lib.zproc_set_args(self._as_parameter_, byref(zlist_p.from_param(arguments)))
Setup the command line arguments, the first item must be an (absolute) filename to run.
Below is the the instruction that describes the task: ### Input: Setup the command line arguments, the first item must be an (absolute) filename to run. ### Response: def set_args(self, arguments): """ Setup the command line arguments, the first item must be an (absolute) filename to run. """ return lib.zproc_set_args(self._as_parameter_, byref(zlist_p.from_param(arguments)))
def get_user_home(self, user): """Returns the default URL for a particular user. This method can be used to customize where a user is sent when they log in, etc. By default it returns the value of :meth:`get_absolute_url`. An alternative function can be supplied to customize this behavior by specifying a either a URL or a function which returns a URL via the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these would be valid:: {"user_home": "/home",} # A URL {"user_home": "my_module.get_user_home",} # Path to a function {"user_home": lambda user: "/" + user.name,} # A function {"user_home": None,} # Will always return the default dashboard This can be useful if the default dashboard may not be accessible to all users. When user_home is missing from HORIZON_CONFIG, it will default to the settings.LOGIN_REDIRECT_URL value. """ user_home = self._conf['user_home'] if user_home: if callable(user_home): return user_home(user) elif isinstance(user_home, six.string_types): # Assume we've got a URL if there's a slash in it if '/' in user_home: return user_home else: mod, func = user_home.rsplit(".", 1) return getattr(import_module(mod), func)(user) # If it's not callable and not a string, it's wrong. raise ValueError('The user_home setting must be either a string ' 'or a callable object (e.g. a function).') else: return self.get_absolute_url()
Returns the default URL for a particular user. This method can be used to customize where a user is sent when they log in, etc. By default it returns the value of :meth:`get_absolute_url`. An alternative function can be supplied to customize this behavior by specifying a either a URL or a function which returns a URL via the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these would be valid:: {"user_home": "/home",} # A URL {"user_home": "my_module.get_user_home",} # Path to a function {"user_home": lambda user: "/" + user.name,} # A function {"user_home": None,} # Will always return the default dashboard This can be useful if the default dashboard may not be accessible to all users. When user_home is missing from HORIZON_CONFIG, it will default to the settings.LOGIN_REDIRECT_URL value.
Below is the the instruction that describes the task: ### Input: Returns the default URL for a particular user. This method can be used to customize where a user is sent when they log in, etc. By default it returns the value of :meth:`get_absolute_url`. An alternative function can be supplied to customize this behavior by specifying a either a URL or a function which returns a URL via the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these would be valid:: {"user_home": "/home",} # A URL {"user_home": "my_module.get_user_home",} # Path to a function {"user_home": lambda user: "/" + user.name,} # A function {"user_home": None,} # Will always return the default dashboard This can be useful if the default dashboard may not be accessible to all users. When user_home is missing from HORIZON_CONFIG, it will default to the settings.LOGIN_REDIRECT_URL value. ### Response: def get_user_home(self, user): """Returns the default URL for a particular user. This method can be used to customize where a user is sent when they log in, etc. By default it returns the value of :meth:`get_absolute_url`. An alternative function can be supplied to customize this behavior by specifying a either a URL or a function which returns a URL via the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these would be valid:: {"user_home": "/home",} # A URL {"user_home": "my_module.get_user_home",} # Path to a function {"user_home": lambda user: "/" + user.name,} # A function {"user_home": None,} # Will always return the default dashboard This can be useful if the default dashboard may not be accessible to all users. When user_home is missing from HORIZON_CONFIG, it will default to the settings.LOGIN_REDIRECT_URL value. """ user_home = self._conf['user_home'] if user_home: if callable(user_home): return user_home(user) elif isinstance(user_home, six.string_types): # Assume we've got a URL if there's a slash in it if '/' in user_home: return user_home else: mod, func = user_home.rsplit(".", 1) return getattr(import_module(mod), func)(user) # If it's not callable and not a string, it's wrong. raise ValueError('The user_home setting must be either a string ' 'or a callable object (e.g. a function).') else: return self.get_absolute_url()
def handle_offchain_secretreveal( mediator_state: MediatorTransferState, mediator_state_change: ReceiveSecretReveal, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, block_number: BlockNumber, block_hash: BlockHash, ) -> TransitionResult[MediatorTransferState]: """ Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. """ is_valid_reveal = is_valid_secret_reveal( state_change=mediator_state_change, transfer_secrethash=mediator_state.secrethash, secret=mediator_state_change.secret, ) is_secret_unknown = mediator_state.secret is None # a SecretReveal should be rejected if the payer transfer # has expired. To check for this, we use the last # transfer pair. transfer_pair = mediator_state.transfers_pair[-1] payer_transfer = transfer_pair.payer_transfer channel_identifier = payer_transfer.balance_proof.channel_identifier payer_channel = channelidentifiers_to_channels.get(channel_identifier) if not payer_channel: return TransitionResult(mediator_state, list()) has_payer_transfer_expired = channel.is_transfer_expired( transfer=transfer_pair.payer_transfer, affected_channel=payer_channel, block_number=block_number, ) if is_secret_unknown and is_valid_reveal and not has_payer_transfer_expired: iteration = secret_learned( state=mediator_state, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, block_number=block_number, block_hash=block_hash, secret=mediator_state_change.secret, secrethash=mediator_state_change.secrethash, payee_address=mediator_state_change.sender, ) else: iteration = TransitionResult(mediator_state, list()) return iteration
Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary.
Below is the the instruction that describes the task: ### Input: Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. ### Response: def handle_offchain_secretreveal( mediator_state: MediatorTransferState, mediator_state_change: ReceiveSecretReveal, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, block_number: BlockNumber, block_hash: BlockHash, ) -> TransitionResult[MediatorTransferState]: """ Handles the secret reveal and sends SendBalanceProof/RevealSecret if necessary. """ is_valid_reveal = is_valid_secret_reveal( state_change=mediator_state_change, transfer_secrethash=mediator_state.secrethash, secret=mediator_state_change.secret, ) is_secret_unknown = mediator_state.secret is None # a SecretReveal should be rejected if the payer transfer # has expired. To check for this, we use the last # transfer pair. transfer_pair = mediator_state.transfers_pair[-1] payer_transfer = transfer_pair.payer_transfer channel_identifier = payer_transfer.balance_proof.channel_identifier payer_channel = channelidentifiers_to_channels.get(channel_identifier) if not payer_channel: return TransitionResult(mediator_state, list()) has_payer_transfer_expired = channel.is_transfer_expired( transfer=transfer_pair.payer_transfer, affected_channel=payer_channel, block_number=block_number, ) if is_secret_unknown and is_valid_reveal and not has_payer_transfer_expired: iteration = secret_learned( state=mediator_state, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, block_number=block_number, block_hash=block_hash, secret=mediator_state_change.secret, secrethash=mediator_state_change.secrethash, payee_address=mediator_state_change.sender, ) else: iteration = TransitionResult(mediator_state, list()) return iteration
def validate(self, output_type, output_params): """ Check that a subscription is defined correctly. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushvalidate :param output_type: One of DataSift's supported output types, e.g. s3 :type output_type: str :param output_params: The set of parameters required by the specified output_type for docs on all available connectors see http://dev.datasift.com/docs/push/connectors/ :type output_params: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('validate', dict(output_type=output_type, output_params=output_params))
Check that a subscription is defined correctly. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushvalidate :param output_type: One of DataSift's supported output types, e.g. s3 :type output_type: str :param output_params: The set of parameters required by the specified output_type for docs on all available connectors see http://dev.datasift.com/docs/push/connectors/ :type output_params: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
Below is the the instruction that describes the task: ### Input: Check that a subscription is defined correctly. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushvalidate :param output_type: One of DataSift's supported output types, e.g. s3 :type output_type: str :param output_params: The set of parameters required by the specified output_type for docs on all available connectors see http://dev.datasift.com/docs/push/connectors/ :type output_params: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` ### Response: def validate(self, output_type, output_params): """ Check that a subscription is defined correctly. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushvalidate :param output_type: One of DataSift's supported output types, e.g. s3 :type output_type: str :param output_params: The set of parameters required by the specified output_type for docs on all available connectors see http://dev.datasift.com/docs/push/connectors/ :type output_params: str :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ return self.request.post('validate', dict(output_type=output_type, output_params=output_params))
async def AddCloud(self, cloud, name): ''' cloud : Cloud name : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Cloud', request='AddCloud', version=3, params=_params) _params['cloud'] = cloud _params['name'] = name reply = await self.rpc(msg) return reply
cloud : Cloud name : str Returns -> None
Below is the the instruction that describes the task: ### Input: cloud : Cloud name : str Returns -> None ### Response: async def AddCloud(self, cloud, name): ''' cloud : Cloud name : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Cloud', request='AddCloud', version=3, params=_params) _params['cloud'] = cloud _params['name'] = name reply = await self.rpc(msg) return reply
def sortJobs(jobTypes, options): """ Return a jobTypes all sorted. """ longforms = {"med": "median", "ave": "average", "min": "min", "total": "total", "max": "max",} sortField = longforms[options.sortField] if (options.sortCategory == "time" or options.sortCategory == "clock" or options.sortCategory == "wait" or options.sortCategory == "memory" ): return sorted( jobTypes, key=lambda tag: getattr(tag, "%s_%s" % (sortField, options.sortCategory)), reverse=options.sortReverse) elif options.sortCategory == "alpha": return sorted( jobTypes, key=lambda tag: tag.name, reverse=options.sortReverse) elif options.sortCategory == "count": return sorted(jobTypes, key=lambda tag: tag.total_number, reverse=options.sortReverse)
Return a jobTypes all sorted.
Below is the the instruction that describes the task: ### Input: Return a jobTypes all sorted. ### Response: def sortJobs(jobTypes, options): """ Return a jobTypes all sorted. """ longforms = {"med": "median", "ave": "average", "min": "min", "total": "total", "max": "max",} sortField = longforms[options.sortField] if (options.sortCategory == "time" or options.sortCategory == "clock" or options.sortCategory == "wait" or options.sortCategory == "memory" ): return sorted( jobTypes, key=lambda tag: getattr(tag, "%s_%s" % (sortField, options.sortCategory)), reverse=options.sortReverse) elif options.sortCategory == "alpha": return sorted( jobTypes, key=lambda tag: tag.name, reverse=options.sortReverse) elif options.sortCategory == "count": return sorted(jobTypes, key=lambda tag: tag.total_number, reverse=options.sortReverse)
def _send_invitation(self, enrollment, event): """Send an invitation mail to an open enrolment""" self.log('Sending enrollment status mail to user') self._send_mail(self.config.invitation_subject, self.config.invitation_mail, enrollment, event)
Send an invitation mail to an open enrolment
Below is the the instruction that describes the task: ### Input: Send an invitation mail to an open enrolment ### Response: def _send_invitation(self, enrollment, event): """Send an invitation mail to an open enrolment""" self.log('Sending enrollment status mail to user') self._send_mail(self.config.invitation_subject, self.config.invitation_mail, enrollment, event)
def sample_block_tridiag(H_diag, H_upper_diag): """ helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T,D)) y = info_sample(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return y
helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach.
Below is the the instruction that describes the task: ### Input: helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach. ### Response: def sample_block_tridiag(H_diag, H_upper_diag): """ helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach. """ T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T,D)) y = info_sample(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return y
def parse_source_file(source_file): """Parses a source file thing and returns the file name Example: >>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06') 'js/src/jit/MIR.h' :arg str source_file: the source file ("file") from a stack frame :returns: the filename or ``None`` if it couldn't determine one """ if not source_file: return None vcsinfo = source_file.split(':') if len(vcsinfo) == 4: # These are repositories or cloud file systems (e.g. hg, git, s3) vcstype, root, vcs_source_file, revision = vcsinfo return vcs_source_file if len(vcsinfo) == 2: # These are directories on someone's Windows computer and vcstype is a # file system (e.g. "c:", "d:", "f:") vcstype, vcs_source_file = vcsinfo return vcs_source_file if source_file.startswith('/'): # These are directories on OSX or Linux return source_file # We have no idea what this is, so return None return None
Parses a source file thing and returns the file name Example: >>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06') 'js/src/jit/MIR.h' :arg str source_file: the source file ("file") from a stack frame :returns: the filename or ``None`` if it couldn't determine one
Below is the the instruction that describes the task: ### Input: Parses a source file thing and returns the file name Example: >>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06') 'js/src/jit/MIR.h' :arg str source_file: the source file ("file") from a stack frame :returns: the filename or ``None`` if it couldn't determine one ### Response: def parse_source_file(source_file): """Parses a source file thing and returns the file name Example: >>> parse_file('hg:hg.mozilla.org/releases/mozilla-esr52:js/src/jit/MIR.h:755067c14b06') 'js/src/jit/MIR.h' :arg str source_file: the source file ("file") from a stack frame :returns: the filename or ``None`` if it couldn't determine one """ if not source_file: return None vcsinfo = source_file.split(':') if len(vcsinfo) == 4: # These are repositories or cloud file systems (e.g. hg, git, s3) vcstype, root, vcs_source_file, revision = vcsinfo return vcs_source_file if len(vcsinfo) == 2: # These are directories on someone's Windows computer and vcstype is a # file system (e.g. "c:", "d:", "f:") vcstype, vcs_source_file = vcsinfo return vcs_source_file if source_file.startswith('/'): # These are directories on OSX or Linux return source_file # We have no idea what this is, so return None return None
def enrich_variants(graph: BELGraph, func: Union[None, str, Iterable[str]] = None): """Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene. """ if func is None: func = {PROTEIN, RNA, MIRNA, GENE} nodes = list(get_nodes_by_function(graph, func)) for u in nodes: parent = u.get_parent() if parent is None: continue if parent not in graph: graph.add_has_variant(parent, u)
Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene.
Below is the the instruction that describes the task: ### Input: Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene. ### Response: def enrich_variants(graph: BELGraph, func: Union[None, str, Iterable[str]] = None): """Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene. """ if func is None: func = {PROTEIN, RNA, MIRNA, GENE} nodes = list(get_nodes_by_function(graph, func)) for u in nodes: parent = u.get_parent() if parent is None: continue if parent not in graph: graph.add_has_variant(parent, u)
def get_api_key(self, api_key_id): """Get API key details for key registered in organisation. :param str api_key_id: The ID of the API key to be updated (Required) :returns: API key object :rtype: ApiKey """ api = self._get_api(iam.DeveloperApi) return ApiKey(api.get_api_key(api_key_id))
Get API key details for key registered in organisation. :param str api_key_id: The ID of the API key to be updated (Required) :returns: API key object :rtype: ApiKey
Below is the the instruction that describes the task: ### Input: Get API key details for key registered in organisation. :param str api_key_id: The ID of the API key to be updated (Required) :returns: API key object :rtype: ApiKey ### Response: def get_api_key(self, api_key_id): """Get API key details for key registered in organisation. :param str api_key_id: The ID of the API key to be updated (Required) :returns: API key object :rtype: ApiKey """ api = self._get_api(iam.DeveloperApi) return ApiKey(api.get_api_key(api_key_id))
def address(addr): """ A special argument type that splits a string on ':' and transforms the result into a tuple of host and (integer) port. """ if ':' in addr: # Using rpartition here means we should be able to support # IPv6, but only with a strict syntax host, _sep, port = addr.rpartition(':') # Convert the port to a number try: port = int(port) except ValueError: raise ValueError("invalid port number %r" % port) addr = (host, port) return addr
A special argument type that splits a string on ':' and transforms the result into a tuple of host and (integer) port.
Below is the the instruction that describes the task: ### Input: A special argument type that splits a string on ':' and transforms the result into a tuple of host and (integer) port. ### Response: def address(addr): """ A special argument type that splits a string on ':' and transforms the result into a tuple of host and (integer) port. """ if ':' in addr: # Using rpartition here means we should be able to support # IPv6, but only with a strict syntax host, _sep, port = addr.rpartition(':') # Convert the port to a number try: port = int(port) except ValueError: raise ValueError("invalid port number %r" % port) addr = (host, port) return addr
def SetWindowPos(handle: int, hWndInsertAfter: int, x: int, y: int, width: int, height: int, flags: int) -> bool: """ SetWindowPos from Win32. handle: int, the handle of a native window. hWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP. x: int. y: int. width: int. height: int. flags: int, values whose name starts with 'SWP' in class `SWP`. Return bool, True if succeed otherwise False. """ return ctypes.windll.user32.SetWindowPos(ctypes.c_void_p(handle), ctypes.c_void_p(hWndInsertAfter), x, y, width, height, flags)
SetWindowPos from Win32. handle: int, the handle of a native window. hWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP. x: int. y: int. width: int. height: int. flags: int, values whose name starts with 'SWP' in class `SWP`. Return bool, True if succeed otherwise False.
Below is the the instruction that describes the task: ### Input: SetWindowPos from Win32. handle: int, the handle of a native window. hWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP. x: int. y: int. width: int. height: int. flags: int, values whose name starts with 'SWP' in class `SWP`. Return bool, True if succeed otherwise False. ### Response: def SetWindowPos(handle: int, hWndInsertAfter: int, x: int, y: int, width: int, height: int, flags: int) -> bool: """ SetWindowPos from Win32. handle: int, the handle of a native window. hWndInsertAfter: int, a value whose name starts with 'HWND' in class SWP. x: int. y: int. width: int. height: int. flags: int, values whose name starts with 'SWP' in class `SWP`. Return bool, True if succeed otherwise False. """ return ctypes.windll.user32.SetWindowPos(ctypes.c_void_p(handle), ctypes.c_void_p(hWndInsertAfter), x, y, width, height, flags)
def should_exclude(type_or_instance, exclusion_list): """ Tests whether an object should be simply returned when being wrapped """ if type_or_instance in exclusion_list: # Check class definition return True if type(type_or_instance) in exclusion_list: # Check instance type return True try: if type_or_instance.__class__ in exclusion_list: # Check instance class return True except: pass return False
Tests whether an object should be simply returned when being wrapped
Below is the the instruction that describes the task: ### Input: Tests whether an object should be simply returned when being wrapped ### Response: def should_exclude(type_or_instance, exclusion_list): """ Tests whether an object should be simply returned when being wrapped """ if type_or_instance in exclusion_list: # Check class definition return True if type(type_or_instance) in exclusion_list: # Check instance type return True try: if type_or_instance.__class__ in exclusion_list: # Check instance class return True except: pass return False
def ipv4_private(self, network=False, address_class=None): """ Returns a private IPv4. :param network: Network address :param address_class: IPv4 address class (a, b, or c) :returns: Private IPv4 """ # compute private networks from given class supernet = _IPv4Constants._network_classes[ address_class or self.ipv4_network_class() ] private_networks = [ subnet for subnet in _IPv4Constants._private_networks if subnet.overlaps(supernet) ] # exclude special networks private_networks = self._exclude_ipv4_networks( private_networks, _IPv4Constants._excluded_networks, ) # choose random private network from the list private_network = self.generator.random.choice(private_networks) return self._random_ipv4_address_from_subnet(private_network, network)
Returns a private IPv4. :param network: Network address :param address_class: IPv4 address class (a, b, or c) :returns: Private IPv4
Below is the the instruction that describes the task: ### Input: Returns a private IPv4. :param network: Network address :param address_class: IPv4 address class (a, b, or c) :returns: Private IPv4 ### Response: def ipv4_private(self, network=False, address_class=None): """ Returns a private IPv4. :param network: Network address :param address_class: IPv4 address class (a, b, or c) :returns: Private IPv4 """ # compute private networks from given class supernet = _IPv4Constants._network_classes[ address_class or self.ipv4_network_class() ] private_networks = [ subnet for subnet in _IPv4Constants._private_networks if subnet.overlaps(supernet) ] # exclude special networks private_networks = self._exclude_ipv4_networks( private_networks, _IPv4Constants._excluded_networks, ) # choose random private network from the list private_network = self.generator.random.choice(private_networks) return self._random_ipv4_address_from_subnet(private_network, network)
def spline_base2d(width, height, nr_knots_x = 20.0, nr_knots_y = 20.0, spline_order = 5, marginal_x = None, marginal_y = None): """Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots. """ if not (nr_knots_x<width and nr_knots_y<height): raise RuntimeError("Too many knots for size of the base") if marginal_x is None: knots_x = augknt(np.linspace(0,width+1,nr_knots_x), spline_order) else: knots_x = knots_from_marginal(marginal_x, nr_knots_x, spline_order) if marginal_y is None: knots_y = augknt(np.linspace(0,height+1, nr_knots_y), spline_order) else: knots_y = knots_from_marginal(marginal_y, nr_knots_y, spline_order) x_eval = np.arange(1,width+1).astype(float) y_eval = np.arange(1,height+1).astype(float) spline_setx = spcol(x_eval, knots_x, spline_order) spline_sety = spcol(y_eval, knots_y, spline_order) nr_coeff = [spline_sety.shape[1], spline_setx.shape[1]] dim_bspline = [nr_coeff[0]*nr_coeff[1], len(x_eval)*len(y_eval)] # construct 2D B-splines nr_basis = 0 bspline = np.zeros(dim_bspline) for IDX1 in range(0,nr_coeff[0]): for IDX2 in range(0, nr_coeff[1]): rand_coeff = np.zeros((nr_coeff[0] , nr_coeff[1])) rand_coeff[IDX1,IDX2] = 1 tmp = np.dot(spline_sety,rand_coeff) bspline[nr_basis,:] = np.dot(tmp,spline_setx.T).reshape((1,-1)) nr_basis = nr_basis+1 return bspline, (knots_x, knots_y)
Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots.
Below is the the instruction that describes the task: ### Input: Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots. ### Response: def spline_base2d(width, height, nr_knots_x = 20.0, nr_knots_y = 20.0, spline_order = 5, marginal_x = None, marginal_y = None): """Computes a set of 2D spline basis functions. The basis functions cover the entire space in height*width and can for example be used to create fixation density maps. Input: width: int width of each basis height: int height of each basis nr_knots_x: int of knots in x (width) direction. nr_knots_y: int of knots in y (height) direction. spline_order: int Order of the spline. marginal_x: array, optional Estimate of marginal distribution of the input to be fitted along the x-direction (width). If given, it is used to determine the positioning of knots, each knot will cover the same amount of probability mass. If not given, knots are equally spaced. marginal_y: array, optional Marginal distribution along the y-direction (height). If given, it is used to determine the positioning of knots. Each knot will cover the same amount of probability mass. Output: basis: Matrix Matrix of size n*(width*height) that contains in each row one vectorized basis. knots: Tuple (x,y) are knot arrays that show the placement of knots. """ if not (nr_knots_x<width and nr_knots_y<height): raise RuntimeError("Too many knots for size of the base") if marginal_x is None: knots_x = augknt(np.linspace(0,width+1,nr_knots_x), spline_order) else: knots_x = knots_from_marginal(marginal_x, nr_knots_x, spline_order) if marginal_y is None: knots_y = augknt(np.linspace(0,height+1, nr_knots_y), spline_order) else: knots_y = knots_from_marginal(marginal_y, nr_knots_y, spline_order) x_eval = np.arange(1,width+1).astype(float) y_eval = np.arange(1,height+1).astype(float) spline_setx = spcol(x_eval, knots_x, spline_order) spline_sety = spcol(y_eval, knots_y, spline_order) nr_coeff = [spline_sety.shape[1], spline_setx.shape[1]] dim_bspline = [nr_coeff[0]*nr_coeff[1], len(x_eval)*len(y_eval)] # construct 2D B-splines nr_basis = 0 bspline = np.zeros(dim_bspline) for IDX1 in range(0,nr_coeff[0]): for IDX2 in range(0, nr_coeff[1]): rand_coeff = np.zeros((nr_coeff[0] , nr_coeff[1])) rand_coeff[IDX1,IDX2] = 1 tmp = np.dot(spline_sety,rand_coeff) bspline[nr_basis,:] = np.dot(tmp,spline_setx.T).reshape((1,-1)) nr_basis = nr_basis+1 return bspline, (knots_x, knots_y)
def _create_properties(self): """Populate a frame with a list of all editable properties""" self._frame = f = ttk.Labelframe(self._sframe.innerframe, text=_('Widget properties')) f.grid(sticky='nswe') label_tpl = "{0}:" row = 0 col = 0 groups = ( ('00', _('Required'), properties.WIDGET_REQUIRED_OPTIONS, properties.REQUIRED_OPTIONS), ('01', _('Standard'), properties.WIDGET_STANDARD_OPTIONS, properties.TK_WIDGET_OPTIONS), ('02', _('Specific'), properties.WIDGET_SPECIFIC_OPTIONS, properties.TK_WIDGET_OPTIONS), ('03', _('Custom'), properties.WIDGET_CUSTOM_OPTIONS, properties.CUSTOM_OPTIONS), ) for gcode, gname, plist, propdescr in groups: padding = '0 0 0 5' if row == 0 else '0 5 0 5' label = ttk.Label(self._frame, text=gname, font='TkDefaultFont 10 bold', padding=padding, foreground='#000059') label.grid(row=row, column=0, sticky='we', columnspan=2) row += 1 for name in plist: kwdata = propdescr[name] labeltext = label_tpl.format(name) label = ttk.Label(self._frame, text=labeltext, anchor=tk.W) label.grid(row=row, column=col, sticky=tk.EW, pady=2) widget = self._create_editor(self._frame, name, kwdata) widget.grid(row=row, column=col+1, sticky=tk.EW, pady=2) row += 1 self._propbag[gcode+name] = (label, widget) logger.debug('Created property: {0}-{1}'.format(gname,name))
Populate a frame with a list of all editable properties
Below is the the instruction that describes the task: ### Input: Populate a frame with a list of all editable properties ### Response: def _create_properties(self): """Populate a frame with a list of all editable properties""" self._frame = f = ttk.Labelframe(self._sframe.innerframe, text=_('Widget properties')) f.grid(sticky='nswe') label_tpl = "{0}:" row = 0 col = 0 groups = ( ('00', _('Required'), properties.WIDGET_REQUIRED_OPTIONS, properties.REQUIRED_OPTIONS), ('01', _('Standard'), properties.WIDGET_STANDARD_OPTIONS, properties.TK_WIDGET_OPTIONS), ('02', _('Specific'), properties.WIDGET_SPECIFIC_OPTIONS, properties.TK_WIDGET_OPTIONS), ('03', _('Custom'), properties.WIDGET_CUSTOM_OPTIONS, properties.CUSTOM_OPTIONS), ) for gcode, gname, plist, propdescr in groups: padding = '0 0 0 5' if row == 0 else '0 5 0 5' label = ttk.Label(self._frame, text=gname, font='TkDefaultFont 10 bold', padding=padding, foreground='#000059') label.grid(row=row, column=0, sticky='we', columnspan=2) row += 1 for name in plist: kwdata = propdescr[name] labeltext = label_tpl.format(name) label = ttk.Label(self._frame, text=labeltext, anchor=tk.W) label.grid(row=row, column=col, sticky=tk.EW, pady=2) widget = self._create_editor(self._frame, name, kwdata) widget.grid(row=row, column=col+1, sticky=tk.EW, pady=2) row += 1 self._propbag[gcode+name] = (label, widget) logger.debug('Created property: {0}-{1}'.format(gname,name))
def reset( self ): """ Resets the values to the current application information. """ self.setValue('colorSet', XPaletteColorSet()) self.setValue('font', QApplication.font()) self.setValue('fontSize', QApplication.font().pointSize())
Resets the values to the current application information.
Below is the the instruction that describes the task: ### Input: Resets the values to the current application information. ### Response: def reset( self ): """ Resets the values to the current application information. """ self.setValue('colorSet', XPaletteColorSet()) self.setValue('font', QApplication.font()) self.setValue('fontSize', QApplication.font().pointSize())
def _set_log_view(self, session): """Sets the underlying log view to match current view""" if self._log_view == FEDERATED: try: session.use_federated_log_view() except AttributeError: pass else: try: session.use_isolated_log_view() except AttributeError: pass
Sets the underlying log view to match current view
Below is the the instruction that describes the task: ### Input: Sets the underlying log view to match current view ### Response: def _set_log_view(self, session): """Sets the underlying log view to match current view""" if self._log_view == FEDERATED: try: session.use_federated_log_view() except AttributeError: pass else: try: session.use_isolated_log_view() except AttributeError: pass
def fix_opcode_names(opmap): """ Python stupidly named some OPCODES with a + which prevents using opcode name directly as an attribute, e.g. SLICE+3. So we turn that into SLICE_3 so we can then use opcode_23.SLICE_3. Later Python's fix this. """ return dict([(k.replace('+', '_'), v) for (k, v) in opmap.items()])
Python stupidly named some OPCODES with a + which prevents using opcode name directly as an attribute, e.g. SLICE+3. So we turn that into SLICE_3 so we can then use opcode_23.SLICE_3. Later Python's fix this.
Below is the the instruction that describes the task: ### Input: Python stupidly named some OPCODES with a + which prevents using opcode name directly as an attribute, e.g. SLICE+3. So we turn that into SLICE_3 so we can then use opcode_23.SLICE_3. Later Python's fix this. ### Response: def fix_opcode_names(opmap): """ Python stupidly named some OPCODES with a + which prevents using opcode name directly as an attribute, e.g. SLICE+3. So we turn that into SLICE_3 so we can then use opcode_23.SLICE_3. Later Python's fix this. """ return dict([(k.replace('+', '_'), v) for (k, v) in opmap.items()])
def gather_readme(self): """ Return the readme file. """ if not os.path.exists(self.paths["readme"]): return "" return utils.file_to_string(self.paths["readme"])
Return the readme file.
Below is the the instruction that describes the task: ### Input: Return the readme file. ### Response: def gather_readme(self): """ Return the readme file. """ if not os.path.exists(self.paths["readme"]): return "" return utils.file_to_string(self.paths["readme"])
def is_number(obj): """Check if obj is number.""" return isinstance(obj, (int, float, np.int_, np.float_))
Check if obj is number.
Below is the the instruction that describes the task: ### Input: Check if obj is number. ### Response: def is_number(obj): """Check if obj is number.""" return isinstance(obj, (int, float, np.int_, np.float_))
def get_tau_at_quantile(mean, stddev, quantile): """ Returns the value of tau at a given quantile in the form of a dictionary organised by intensity measure """ tau_model = {} for imt in mean: tau_model[imt] = {} for key in mean[imt]: if quantile is None: tau_model[imt][key] = mean[imt][key] else: tau_model[imt][key] = _at_percentile(mean[imt][key], stddev[imt][key], quantile) return tau_model
Returns the value of tau at a given quantile in the form of a dictionary organised by intensity measure
Below is the the instruction that describes the task: ### Input: Returns the value of tau at a given quantile in the form of a dictionary organised by intensity measure ### Response: def get_tau_at_quantile(mean, stddev, quantile): """ Returns the value of tau at a given quantile in the form of a dictionary organised by intensity measure """ tau_model = {} for imt in mean: tau_model[imt] = {} for key in mean[imt]: if quantile is None: tau_model[imt][key] = mean[imt][key] else: tau_model[imt][key] = _at_percentile(mean[imt][key], stddev[imt][key], quantile) return tau_model
def close_connection(self): "Request a connection close from the SMTP session handling instance." if self._is_connected: self._is_connected = False self._command_parser.close_when_done()
Request a connection close from the SMTP session handling instance.
Below is the the instruction that describes the task: ### Input: Request a connection close from the SMTP session handling instance. ### Response: def close_connection(self): "Request a connection close from the SMTP session handling instance." if self._is_connected: self._is_connected = False self._command_parser.close_when_done()
def cancel(self, block=True): """Cancel a call to consume() happening in another thread This could take up to DashiConnection.consumer_timeout to complete. @param block: if True, waits until the consumer has returned """ if self._consumer: self._consumer.cancel(block=block)
Cancel a call to consume() happening in another thread This could take up to DashiConnection.consumer_timeout to complete. @param block: if True, waits until the consumer has returned
Below is the the instruction that describes the task: ### Input: Cancel a call to consume() happening in another thread This could take up to DashiConnection.consumer_timeout to complete. @param block: if True, waits until the consumer has returned ### Response: def cancel(self, block=True): """Cancel a call to consume() happening in another thread This could take up to DashiConnection.consumer_timeout to complete. @param block: if True, waits until the consumer has returned """ if self._consumer: self._consumer.cancel(block=block)
def script_to_hex(script): """ Parse the string representation of a script and return the hex version. Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG" """ hex_script = '' parts = script.split(' ') for part in parts: if part[0:3] == 'OP_': try: hex_script += '%0.2x' % eval(part) except: raise Exception('Invalid opcode: %s' % part) elif isinstance(part, (int)): hex_script += '%0.2x' % part elif is_hex(part): hex_script += '%0.2x' % count_bytes(part) + part else: raise Exception('Invalid script - only opcodes and hex characters allowed.') return hex_script
Parse the string representation of a script and return the hex version. Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG"
Below is the the instruction that describes the task: ### Input: Parse the string representation of a script and return the hex version. Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG" ### Response: def script_to_hex(script): """ Parse the string representation of a script and return the hex version. Example: "OP_DUP OP_HASH160 c629...a6db OP_EQUALVERIFY OP_CHECKSIG" """ hex_script = '' parts = script.split(' ') for part in parts: if part[0:3] == 'OP_': try: hex_script += '%0.2x' % eval(part) except: raise Exception('Invalid opcode: %s' % part) elif isinstance(part, (int)): hex_script += '%0.2x' % part elif is_hex(part): hex_script += '%0.2x' % count_bytes(part) + part else: raise Exception('Invalid script - only opcodes and hex characters allowed.') return hex_script
def flip_whole(self, tour): """ Test flipping all contigs at the same time to see if score improves. """ score, = self.evaluate_tour_Q(tour) self.signs = -self.signs score_flipped, = self.evaluate_tour_Q(tour) if score_flipped > score: tag = ACCEPT else: self.signs = -self.signs tag = REJECT self.flip_log("FLIPWHOLE", score, score_flipped, tag) return tag
Test flipping all contigs at the same time to see if score improves.
Below is the the instruction that describes the task: ### Input: Test flipping all contigs at the same time to see if score improves. ### Response: def flip_whole(self, tour): """ Test flipping all contigs at the same time to see if score improves. """ score, = self.evaluate_tour_Q(tour) self.signs = -self.signs score_flipped, = self.evaluate_tour_Q(tour) if score_flipped > score: tag = ACCEPT else: self.signs = -self.signs tag = REJECT self.flip_log("FLIPWHOLE", score, score_flipped, tag) return tag
def _init_lsr(n_items, alpha, initial_params): """Initialize the LSR Markov chain and the weights.""" if initial_params is None: weights = np.ones(n_items) else: weights = exp_transform(initial_params) chain = alpha * np.ones((n_items, n_items), dtype=float) return weights, chain
Initialize the LSR Markov chain and the weights.
Below is the the instruction that describes the task: ### Input: Initialize the LSR Markov chain and the weights. ### Response: def _init_lsr(n_items, alpha, initial_params): """Initialize the LSR Markov chain and the weights.""" if initial_params is None: weights = np.ones(n_items) else: weights = exp_transform(initial_params) chain = alpha * np.ones((n_items, n_items), dtype=float) return weights, chain
def apply(script, value=None, vars={}, url=None, opener=default_opener, library_paths=[]): """ Transform value by script, returning all results as list. """ return all(script, value, vars, url, opener, library_paths)
Transform value by script, returning all results as list.
Below is the the instruction that describes the task: ### Input: Transform value by script, returning all results as list. ### Response: def apply(script, value=None, vars={}, url=None, opener=default_opener, library_paths=[]): """ Transform value by script, returning all results as list. """ return all(script, value, vars, url, opener, library_paths)
def amplitude(self): '返回DataStruct.price的百分比变化' res = self.price.groupby( level=1 ).apply(lambda x: (x.max() - x.min()) / x.min()) res.name = 'amplitude' return res
返回DataStruct.price的百分比变化
Below is the the instruction that describes the task: ### Input: 返回DataStruct.price的百分比变化 ### Response: def amplitude(self): '返回DataStruct.price的百分比变化' res = self.price.groupby( level=1 ).apply(lambda x: (x.max() - x.min()) / x.min()) res.name = 'amplitude' return res
def prepare_iter_request( url: Union[methods, str], data: MutableMapping, *, iterkey: Optional[str] = None, itermode: Optional[str] = None, limit: int = 200, itervalue: Optional[Union[str, int]] = None, ) -> Tuple[MutableMapping, str, str]: """ Prepare outgoing iteration request Args: url: :class:`slack.methods` item or string of url data: Outgoing data limit: Maximum number of results to return per call. iterkey: Key in response data to iterate over (required for url string). itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`) itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode) Returns: :py:class:`tuple` (data, iterkey, itermode) """ itermode, iterkey = find_iteration(url, itermode, iterkey) if itermode == "cursor": data["limit"] = limit if itervalue: data["cursor"] = itervalue elif itermode == "page": data["count"] = limit if itervalue: data["page"] = itervalue elif itermode == "timeline": data["count"] = limit if itervalue: data["latest"] = itervalue return data, iterkey, itermode
Prepare outgoing iteration request Args: url: :class:`slack.methods` item or string of url data: Outgoing data limit: Maximum number of results to return per call. iterkey: Key in response data to iterate over (required for url string). itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`) itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode) Returns: :py:class:`tuple` (data, iterkey, itermode)
Below is the the instruction that describes the task: ### Input: Prepare outgoing iteration request Args: url: :class:`slack.methods` item or string of url data: Outgoing data limit: Maximum number of results to return per call. iterkey: Key in response data to iterate over (required for url string). itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`) itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode) Returns: :py:class:`tuple` (data, iterkey, itermode) ### Response: def prepare_iter_request( url: Union[methods, str], data: MutableMapping, *, iterkey: Optional[str] = None, itermode: Optional[str] = None, limit: int = 200, itervalue: Optional[Union[str, int]] = None, ) -> Tuple[MutableMapping, str, str]: """ Prepare outgoing iteration request Args: url: :class:`slack.methods` item or string of url data: Outgoing data limit: Maximum number of results to return per call. iterkey: Key in response data to iterate over (required for url string). itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`) itervalue: Value for current iteration (cursor hash, page or timestamp depending on the itermode) Returns: :py:class:`tuple` (data, iterkey, itermode) """ itermode, iterkey = find_iteration(url, itermode, iterkey) if itermode == "cursor": data["limit"] = limit if itervalue: data["cursor"] = itervalue elif itermode == "page": data["count"] = limit if itervalue: data["page"] = itervalue elif itermode == "timeline": data["count"] = limit if itervalue: data["latest"] = itervalue return data, iterkey, itermode
def get_bert_model(model_name=None, dataset_name=None, vocab=None, pretrained=True, ctx=mx.cpu(), use_pooler=True, use_decoder=True, use_classifier=True, output_attention=False, output_all_encodings=False, root=os.path.join(get_home_dir(), 'models'), **kwargs): """Any BERT pretrained model. Parameters ---------- model_name : str or None, default None Options include 'bert_24_1024_16' and 'bert_12_768_12'. dataset_name : str or None, default None Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased' for both bert_24_1024_16 and bert_12_768_12. 'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased' for bert_12_768_12 only. vocab : gluonnlp.vocab.BERTVocab or None, default None Vocabulary for the dataset. Must be provided if dataset is not specified. pretrained : bool, default True Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. use_pooler : bool, default True Whether to include the pooler which converts the encoded sequence tensor of shape (batch_size, seq_length, units) to a tensor of shape (batch_size, units) for for segment level classification task. use_decoder : bool, default True Whether to include the decoder for masked language model prediction. use_classifier : bool, default True Whether to include the classifier for next sentence classification. output_attention : bool, default False Whether to include attention weights of each encoding cell to the output. output_all_encodings : bool, default False Whether to output encodings of all encoder cells. Returns ------- BERTModel, gluonnlp.vocab.BERTVocab """ predefined_args = bert_hparams[model_name] mutable_args = ['use_residual', 'dropout', 'embed_dropout', 'word_embed'] mutable_args = frozenset(mutable_args) assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) # encoder encoder = BERTEncoder(attention_cell=predefined_args['attention_cell'], num_layers=predefined_args['num_layers'], units=predefined_args['units'], hidden_size=predefined_args['hidden_size'], max_length=predefined_args['max_length'], num_heads=predefined_args['num_heads'], scaled=predefined_args['scaled'], dropout=predefined_args['dropout'], output_attention=output_attention, output_all_encodings=output_all_encodings, use_residual=predefined_args['use_residual']) # bert_vocab from ..vocab import BERTVocab if dataset_name in ['wiki_cn', 'wiki_multilingual']: warnings.warn('wiki_cn/wiki_multilingual will be deprecated.' ' Please use wiki_cn_cased/wiki_multilingual_uncased instead.') bert_vocab = _load_vocab(dataset_name, vocab, root, cls=BERTVocab) # BERT net = BERTModel(encoder, len(bert_vocab), token_type_vocab_size=predefined_args['token_type_vocab_size'], units=predefined_args['units'], embed_size=predefined_args['embed_size'], embed_dropout=predefined_args['embed_dropout'], word_embed=predefined_args['word_embed'], use_pooler=use_pooler, use_decoder=use_decoder, use_classifier=use_classifier) if pretrained: ignore_extra = not (use_pooler and use_decoder and use_classifier) _load_pretrained_params(net, model_name, dataset_name, root, ctx, ignore_extra=ignore_extra) return net, bert_vocab
Any BERT pretrained model. Parameters ---------- model_name : str or None, default None Options include 'bert_24_1024_16' and 'bert_12_768_12'. dataset_name : str or None, default None Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased' for both bert_24_1024_16 and bert_12_768_12. 'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased' for bert_12_768_12 only. vocab : gluonnlp.vocab.BERTVocab or None, default None Vocabulary for the dataset. Must be provided if dataset is not specified. pretrained : bool, default True Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. use_pooler : bool, default True Whether to include the pooler which converts the encoded sequence tensor of shape (batch_size, seq_length, units) to a tensor of shape (batch_size, units) for for segment level classification task. use_decoder : bool, default True Whether to include the decoder for masked language model prediction. use_classifier : bool, default True Whether to include the classifier for next sentence classification. output_attention : bool, default False Whether to include attention weights of each encoding cell to the output. output_all_encodings : bool, default False Whether to output encodings of all encoder cells. Returns ------- BERTModel, gluonnlp.vocab.BERTVocab
Below is the the instruction that describes the task: ### Input: Any BERT pretrained model. Parameters ---------- model_name : str or None, default None Options include 'bert_24_1024_16' and 'bert_12_768_12'. dataset_name : str or None, default None Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased' for both bert_24_1024_16 and bert_12_768_12. 'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased' for bert_12_768_12 only. vocab : gluonnlp.vocab.BERTVocab or None, default None Vocabulary for the dataset. Must be provided if dataset is not specified. pretrained : bool, default True Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. use_pooler : bool, default True Whether to include the pooler which converts the encoded sequence tensor of shape (batch_size, seq_length, units) to a tensor of shape (batch_size, units) for for segment level classification task. use_decoder : bool, default True Whether to include the decoder for masked language model prediction. use_classifier : bool, default True Whether to include the classifier for next sentence classification. output_attention : bool, default False Whether to include attention weights of each encoding cell to the output. output_all_encodings : bool, default False Whether to output encodings of all encoder cells. Returns ------- BERTModel, gluonnlp.vocab.BERTVocab ### Response: def get_bert_model(model_name=None, dataset_name=None, vocab=None, pretrained=True, ctx=mx.cpu(), use_pooler=True, use_decoder=True, use_classifier=True, output_attention=False, output_all_encodings=False, root=os.path.join(get_home_dir(), 'models'), **kwargs): """Any BERT pretrained model. Parameters ---------- model_name : str or None, default None Options include 'bert_24_1024_16' and 'bert_12_768_12'. dataset_name : str or None, default None Options include 'book_corpus_wiki_en_cased', 'book_corpus_wiki_en_uncased' for both bert_24_1024_16 and bert_12_768_12. 'wiki_cn_cased', 'wiki_multilingual_uncased' and 'wiki_multilingual_cased' for bert_12_768_12 only. vocab : gluonnlp.vocab.BERTVocab or None, default None Vocabulary for the dataset. Must be provided if dataset is not specified. pretrained : bool, default True Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. use_pooler : bool, default True Whether to include the pooler which converts the encoded sequence tensor of shape (batch_size, seq_length, units) to a tensor of shape (batch_size, units) for for segment level classification task. use_decoder : bool, default True Whether to include the decoder for masked language model prediction. use_classifier : bool, default True Whether to include the classifier for next sentence classification. output_attention : bool, default False Whether to include attention weights of each encoding cell to the output. output_all_encodings : bool, default False Whether to output encodings of all encoder cells. Returns ------- BERTModel, gluonnlp.vocab.BERTVocab """ predefined_args = bert_hparams[model_name] mutable_args = ['use_residual', 'dropout', 'embed_dropout', 'word_embed'] mutable_args = frozenset(mutable_args) assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) # encoder encoder = BERTEncoder(attention_cell=predefined_args['attention_cell'], num_layers=predefined_args['num_layers'], units=predefined_args['units'], hidden_size=predefined_args['hidden_size'], max_length=predefined_args['max_length'], num_heads=predefined_args['num_heads'], scaled=predefined_args['scaled'], dropout=predefined_args['dropout'], output_attention=output_attention, output_all_encodings=output_all_encodings, use_residual=predefined_args['use_residual']) # bert_vocab from ..vocab import BERTVocab if dataset_name in ['wiki_cn', 'wiki_multilingual']: warnings.warn('wiki_cn/wiki_multilingual will be deprecated.' ' Please use wiki_cn_cased/wiki_multilingual_uncased instead.') bert_vocab = _load_vocab(dataset_name, vocab, root, cls=BERTVocab) # BERT net = BERTModel(encoder, len(bert_vocab), token_type_vocab_size=predefined_args['token_type_vocab_size'], units=predefined_args['units'], embed_size=predefined_args['embed_size'], embed_dropout=predefined_args['embed_dropout'], word_embed=predefined_args['word_embed'], use_pooler=use_pooler, use_decoder=use_decoder, use_classifier=use_classifier) if pretrained: ignore_extra = not (use_pooler and use_decoder and use_classifier) _load_pretrained_params(net, model_name, dataset_name, root, ctx, ignore_extra=ignore_extra) return net, bert_vocab
def flush(self): """Flush(apply) all changed to datastore.""" self.puts.flush() self.deletes.flush() self.ndb_puts.flush() self.ndb_deletes.flush()
Flush(apply) all changed to datastore.
Below is the the instruction that describes the task: ### Input: Flush(apply) all changed to datastore. ### Response: def flush(self): """Flush(apply) all changed to datastore.""" self.puts.flush() self.deletes.flush() self.ndb_puts.flush() self.ndb_deletes.flush()
def valid_input(val): """ Ensure the input the user gave is of a valid format """ # looks for 3 nums followed by a dot 3 times and then ending with # 3 nums, can be proceeded by any number of spaces ip_value = re.compile(r'(\d{1,3}\.){3}\d{1,3}$') # looks for only numbers and commas (because priorities can have commas # between them), can be proceeded by any number of spaces all_num = re.compile(r'(\d,?\ *)+$') sections_comments = re.compile(r""" \ *\#.* # comments (any number of whitespace, then # # followed by anything) | \[[\w-]+\]$ # section headers (any combination of chars, nums, # underscores, and dashes between brackets) """, re.VERBOSE) # can't can be a comment on option side and value side can't have # [, ], {, or } otherwise it is turned over to literal_eval for # checkout options_values = re.compile(r'[^# ]+\ *=[^[\]{}]*$') line_num = 0 warning_str = '' error_str = '' trimmed_val = [] for entry in val.split('\n'): line_num += 1 # get rid of any extraneous commas at the end of a dict and remove # extra whitespace from input trimmed_val.append(re.sub(r',\ *}', '}', entry).strip()) # empty line if entry.strip() == '': continue # look at regular (non dictionary or list) option-value pairs if options_values.match(entry): value = entry.split('=', 1)[1] # deal with potentially more equals signs for val in value.split('='): val = val.strip() # empty val means malformed equals signs if val == '': error_str += '-You have a misplaced equals sign on' \ ' line ' + str(line_num) + '\n' # starts with a num; look for bad ip input or warn user # about having extraneous characters in number input if re.match('\ *\d', val): # bad ip syntax if val.find('.') >= 0 and not ip_value.match(val): error_str += '-You have an incorrectly' \ ' formatted ip address (bad syntax) at' \ ' line ' + str(line_num) + '\n' # possibly malformed numbers elif val.find('.') < 0 and not all_num.match(val): warning_str += '-Line starting with a number has' \ ' characters mixed in at line ' + \ str(line_num) + '\n' # bad ip values elif val.find('.') >= 0: for num in val.strip().split('.'): num = int(num) if num > 255 or num < 0: error_str += '-You have an incorrectly' \ ' formatted ip address (values' \ ' exceeding 255 or below 0) at' \ ' line ' + str(line_num) + '\n' # ensure no lines end with a comma (most likely extraneous # commas from groups or priorities) if re.search(',$', val): error_str += '-You have an incorrect comma at the' \ ' end of line ' + str(line_num) + '\n' # see if input is a header or comment, otherwise try to # literal_eval it to ensure correct structure elif not sections_comments.match(entry): lit_val = '' try: opt_val = entry.split('=', 1) if opt_val[0].strip() == '': error_str += '-You have nothing preceeding an' \ ' equals sign at line ' + str(line_num) + '\n' else: lit_val = opt_val[1].strip() except IndexError: lit_val = '' error_str += '-You have an incorrectly formatted' \ ' section header at line ' + str(line_num) + '\n' if lit_val: try: ast.literal_eval(lit_val) except SyntaxError: error_str += '-You have an incorrectly formatted' \ ' list/dictionary at line ' + str(line_num) + \ '\n' if error_str: npyscreen.notify_confirm('You have the following error(s) and' " can't proceed until they are fixed:" + '\n' + '-'*50 + '\n' + error_str, title='Error in input') return (False, '') elif warning_str: res = npyscreen.notify_yes_no('You have may have some error(s)' ' that you want to check before' ' proceeding:' + '\n' + '-'*50 + '\n' + warning_str + '\n' + '-'*50 + '\n' + 'Do you want to continue?', title='Double check') return (res, '\n'.join(trimmed_val)) return (True, '\n'.join(trimmed_val))
Ensure the input the user gave is of a valid format
Below is the the instruction that describes the task: ### Input: Ensure the input the user gave is of a valid format ### Response: def valid_input(val): """ Ensure the input the user gave is of a valid format """ # looks for 3 nums followed by a dot 3 times and then ending with # 3 nums, can be proceeded by any number of spaces ip_value = re.compile(r'(\d{1,3}\.){3}\d{1,3}$') # looks for only numbers and commas (because priorities can have commas # between them), can be proceeded by any number of spaces all_num = re.compile(r'(\d,?\ *)+$') sections_comments = re.compile(r""" \ *\#.* # comments (any number of whitespace, then # # followed by anything) | \[[\w-]+\]$ # section headers (any combination of chars, nums, # underscores, and dashes between brackets) """, re.VERBOSE) # can't can be a comment on option side and value side can't have # [, ], {, or } otherwise it is turned over to literal_eval for # checkout options_values = re.compile(r'[^# ]+\ *=[^[\]{}]*$') line_num = 0 warning_str = '' error_str = '' trimmed_val = [] for entry in val.split('\n'): line_num += 1 # get rid of any extraneous commas at the end of a dict and remove # extra whitespace from input trimmed_val.append(re.sub(r',\ *}', '}', entry).strip()) # empty line if entry.strip() == '': continue # look at regular (non dictionary or list) option-value pairs if options_values.match(entry): value = entry.split('=', 1)[1] # deal with potentially more equals signs for val in value.split('='): val = val.strip() # empty val means malformed equals signs if val == '': error_str += '-You have a misplaced equals sign on' \ ' line ' + str(line_num) + '\n' # starts with a num; look for bad ip input or warn user # about having extraneous characters in number input if re.match('\ *\d', val): # bad ip syntax if val.find('.') >= 0 and not ip_value.match(val): error_str += '-You have an incorrectly' \ ' formatted ip address (bad syntax) at' \ ' line ' + str(line_num) + '\n' # possibly malformed numbers elif val.find('.') < 0 and not all_num.match(val): warning_str += '-Line starting with a number has' \ ' characters mixed in at line ' + \ str(line_num) + '\n' # bad ip values elif val.find('.') >= 0: for num in val.strip().split('.'): num = int(num) if num > 255 or num < 0: error_str += '-You have an incorrectly' \ ' formatted ip address (values' \ ' exceeding 255 or below 0) at' \ ' line ' + str(line_num) + '\n' # ensure no lines end with a comma (most likely extraneous # commas from groups or priorities) if re.search(',$', val): error_str += '-You have an incorrect comma at the' \ ' end of line ' + str(line_num) + '\n' # see if input is a header or comment, otherwise try to # literal_eval it to ensure correct structure elif not sections_comments.match(entry): lit_val = '' try: opt_val = entry.split('=', 1) if opt_val[0].strip() == '': error_str += '-You have nothing preceeding an' \ ' equals sign at line ' + str(line_num) + '\n' else: lit_val = opt_val[1].strip() except IndexError: lit_val = '' error_str += '-You have an incorrectly formatted' \ ' section header at line ' + str(line_num) + '\n' if lit_val: try: ast.literal_eval(lit_val) except SyntaxError: error_str += '-You have an incorrectly formatted' \ ' list/dictionary at line ' + str(line_num) + \ '\n' if error_str: npyscreen.notify_confirm('You have the following error(s) and' " can't proceed until they are fixed:" + '\n' + '-'*50 + '\n' + error_str, title='Error in input') return (False, '') elif warning_str: res = npyscreen.notify_yes_no('You have may have some error(s)' ' that you want to check before' ' proceeding:' + '\n' + '-'*50 + '\n' + warning_str + '\n' + '-'*50 + '\n' + 'Do you want to continue?', title='Double check') return (res, '\n'.join(trimmed_val)) return (True, '\n'.join(trimmed_val))
def process_pub_date(year, mon, day): """Create pub_date from what Pubmed provides in Journal PubDate entry """ pub_date = None if year and re.match("[a-zA-Z]+", mon): pub_date = datetime.datetime.strptime( f"{year}-{mon}-{day}", "%Y-%b-%d" ).strftime("%Y-%m-%d") elif year: pub_date = f"{year}-{mon}-{day}" return pub_date
Create pub_date from what Pubmed provides in Journal PubDate entry
Below is the the instruction that describes the task: ### Input: Create pub_date from what Pubmed provides in Journal PubDate entry ### Response: def process_pub_date(year, mon, day): """Create pub_date from what Pubmed provides in Journal PubDate entry """ pub_date = None if year and re.match("[a-zA-Z]+", mon): pub_date = datetime.datetime.strptime( f"{year}-{mon}-{day}", "%Y-%b-%d" ).strftime("%Y-%m-%d") elif year: pub_date = f"{year}-{mon}-{day}" return pub_date
def update_affinity_group(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Update an affinity group's properties CLI Example: .. code-block:: bash salt-cloud -f update_affinity_group my-azure name=my_group label=my_group ''' if call != 'function': raise SaltCloudSystemExit( 'The update_affinity_group function must be called with -f or --function.' ) if not conn: conn = get_conn() if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('A name must be specified as "name"') if 'label' not in kwargs: raise SaltCloudSystemExit('A label must be specified as "label"') conn.update_affinity_group( affinity_group_name=kwargs['name'], label=kwargs['label'], description=kwargs.get('description', None), ) return show_affinity_group(kwargs={'name': kwargs['name']}, call='function')
.. versionadded:: 2015.8.0 Update an affinity group's properties CLI Example: .. code-block:: bash salt-cloud -f update_affinity_group my-azure name=my_group label=my_group
Below is the the instruction that describes the task: ### Input: .. versionadded:: 2015.8.0 Update an affinity group's properties CLI Example: .. code-block:: bash salt-cloud -f update_affinity_group my-azure name=my_group label=my_group ### Response: def update_affinity_group(kwargs=None, conn=None, call=None): ''' .. versionadded:: 2015.8.0 Update an affinity group's properties CLI Example: .. code-block:: bash salt-cloud -f update_affinity_group my-azure name=my_group label=my_group ''' if call != 'function': raise SaltCloudSystemExit( 'The update_affinity_group function must be called with -f or --function.' ) if not conn: conn = get_conn() if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('A name must be specified as "name"') if 'label' not in kwargs: raise SaltCloudSystemExit('A label must be specified as "label"') conn.update_affinity_group( affinity_group_name=kwargs['name'], label=kwargs['label'], description=kwargs.get('description', None), ) return show_affinity_group(kwargs={'name': kwargs['name']}, call='function')
def _unique_hierarchical_string(self): """ Returns: str: a representation of time such as:: '2014/2/23/15/26/8/9877978' The last part (microsecond) is needed to avoid duplicates in rapid-fire transactions e.g. ``> 1`` edition. """ t = datetime.now() return '%s/%s/%s/%s/%s/%s/%s' % (t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond)
Returns: str: a representation of time such as:: '2014/2/23/15/26/8/9877978' The last part (microsecond) is needed to avoid duplicates in rapid-fire transactions e.g. ``> 1`` edition.
Below is the the instruction that describes the task: ### Input: Returns: str: a representation of time such as:: '2014/2/23/15/26/8/9877978' The last part (microsecond) is needed to avoid duplicates in rapid-fire transactions e.g. ``> 1`` edition. ### Response: def _unique_hierarchical_string(self): """ Returns: str: a representation of time such as:: '2014/2/23/15/26/8/9877978' The last part (microsecond) is needed to avoid duplicates in rapid-fire transactions e.g. ``> 1`` edition. """ t = datetime.now() return '%s/%s/%s/%s/%s/%s/%s' % (t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond)
def events(self): """ Return all events """ all_events = [{ensure_unicode(key): value for key, value in iteritems(ev)} for ev in self._events] for ev in all_events: to_decode = [] for key, value in iteritems(ev): if isinstance(value, binary_type) and key != 'host': to_decode.append(key) for key in to_decode: ev[key] = ensure_unicode(ev[key]) if ev.get('tags'): ev['tags'] = normalize_tags(ev['tags']) return all_events
Return all events
Below is the the instruction that describes the task: ### Input: Return all events ### Response: def events(self): """ Return all events """ all_events = [{ensure_unicode(key): value for key, value in iteritems(ev)} for ev in self._events] for ev in all_events: to_decode = [] for key, value in iteritems(ev): if isinstance(value, binary_type) and key != 'host': to_decode.append(key) for key in to_decode: ev[key] = ensure_unicode(ev[key]) if ev.get('tags'): ev['tags'] = normalize_tags(ev['tags']) return all_events
def _get_prev_next(self, urn): """ Provisional route for GetPrevNext request :param urn: URN to filter the resource :param inv: Inventory Identifier :return: GetPrevNext response """ urn = URN(urn) subreference = None textId = urn.upTo(URN.NO_PASSAGE) if urn.reference is not None: subreference = str(urn.reference) previous, nextious = self.resolver.getSiblings(textId=textId, subreference=subreference) r = render_template( "cts/GetPrevNext.xml", prev_urn=previous, next_urn=nextious, urn=textId, request_urn=str(urn) ) return r, 200, {"content-type": "application/xml"}
Provisional route for GetPrevNext request :param urn: URN to filter the resource :param inv: Inventory Identifier :return: GetPrevNext response
Below is the the instruction that describes the task: ### Input: Provisional route for GetPrevNext request :param urn: URN to filter the resource :param inv: Inventory Identifier :return: GetPrevNext response ### Response: def _get_prev_next(self, urn): """ Provisional route for GetPrevNext request :param urn: URN to filter the resource :param inv: Inventory Identifier :return: GetPrevNext response """ urn = URN(urn) subreference = None textId = urn.upTo(URN.NO_PASSAGE) if urn.reference is not None: subreference = str(urn.reference) previous, nextious = self.resolver.getSiblings(textId=textId, subreference=subreference) r = render_template( "cts/GetPrevNext.xml", prev_urn=previous, next_urn=nextious, urn=textId, request_urn=str(urn) ) return r, 200, {"content-type": "application/xml"}
def __gen_primary_text_file(self): """ generate the PAULA file that contains the primary text of the document graph. (PAULA documents can have more than one primary text, but discoursegraphs only works with documents that are based on exactly one primary text.) Example ------- <?xml version="1.0" standalone="no"?> <!DOCTYPE paula SYSTEM "paula_text.dtd"> <paula version="1.1"> <header paula_id="maz-1423.text" type="text"/> <body>Zum Angewöhnen ...</body> </paula> """ paula_id = '{0}.{1}.text'.format(self.corpus_name, self.name) E, tree = gen_paula_etree(paula_id) tree.append(E.body(get_text(self.dg))) self.files[paula_id] = tree self.file2dtd[paula_id] = PaulaDTDs.text return paula_id
generate the PAULA file that contains the primary text of the document graph. (PAULA documents can have more than one primary text, but discoursegraphs only works with documents that are based on exactly one primary text.) Example ------- <?xml version="1.0" standalone="no"?> <!DOCTYPE paula SYSTEM "paula_text.dtd"> <paula version="1.1"> <header paula_id="maz-1423.text" type="text"/> <body>Zum Angewöhnen ...</body> </paula>
Below is the the instruction that describes the task: ### Input: generate the PAULA file that contains the primary text of the document graph. (PAULA documents can have more than one primary text, but discoursegraphs only works with documents that are based on exactly one primary text.) Example ------- <?xml version="1.0" standalone="no"?> <!DOCTYPE paula SYSTEM "paula_text.dtd"> <paula version="1.1"> <header paula_id="maz-1423.text" type="text"/> <body>Zum Angewöhnen ...</body> </paula> ### Response: def __gen_primary_text_file(self): """ generate the PAULA file that contains the primary text of the document graph. (PAULA documents can have more than one primary text, but discoursegraphs only works with documents that are based on exactly one primary text.) Example ------- <?xml version="1.0" standalone="no"?> <!DOCTYPE paula SYSTEM "paula_text.dtd"> <paula version="1.1"> <header paula_id="maz-1423.text" type="text"/> <body>Zum Angewöhnen ...</body> </paula> """ paula_id = '{0}.{1}.text'.format(self.corpus_name, self.name) E, tree = gen_paula_etree(paula_id) tree.append(E.body(get_text(self.dg))) self.files[paula_id] = tree self.file2dtd[paula_id] = PaulaDTDs.text return paula_id
def sign_request(self, url, method, body, headers): """Sign a request. :param url: The URL to which the request is to be sent. :param headers: The headers in the request. These will be updated with the signature. """ # The use of PLAINTEXT here was copied from MAAS, but we should switch # to HMAC once it works server-side. client = oauth1.Client( self.consumer_key, self.consumer_secret, self.token_key, self.token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT, realm=self.realm) # To preserve API backward compatibility convert an empty string body # to `None`. The old "oauth" library would treat the empty string as # "no body", but "oauthlib" requires `None`. body = None if body is None or len(body) == 0 else body uri, signed_headers, body = client.sign(url, method, body, headers) headers.update(signed_headers)
Sign a request. :param url: The URL to which the request is to be sent. :param headers: The headers in the request. These will be updated with the signature.
Below is the the instruction that describes the task: ### Input: Sign a request. :param url: The URL to which the request is to be sent. :param headers: The headers in the request. These will be updated with the signature. ### Response: def sign_request(self, url, method, body, headers): """Sign a request. :param url: The URL to which the request is to be sent. :param headers: The headers in the request. These will be updated with the signature. """ # The use of PLAINTEXT here was copied from MAAS, but we should switch # to HMAC once it works server-side. client = oauth1.Client( self.consumer_key, self.consumer_secret, self.token_key, self.token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT, realm=self.realm) # To preserve API backward compatibility convert an empty string body # to `None`. The old "oauth" library would treat the empty string as # "no body", but "oauthlib" requires `None`. body = None if body is None or len(body) == 0 else body uri, signed_headers, body = client.sign(url, method, body, headers) headers.update(signed_headers)
def build(self, validator, namespace, config_key, default, help): """Build or retrieve a ValueProxy from the attributes. Proxies are keyed using a repr because default values can be mutable types. """ proxy_attrs = validator, namespace, config_key, default proxy_key = repr(proxy_attrs) if proxy_key in self.proxies: return self.proxies[proxy_key] value_proxy = proxy.ValueProxy(*proxy_attrs) register_value_proxy(namespace, value_proxy, help) return self.proxies.setdefault(proxy_key, value_proxy)
Build or retrieve a ValueProxy from the attributes. Proxies are keyed using a repr because default values can be mutable types.
Below is the the instruction that describes the task: ### Input: Build or retrieve a ValueProxy from the attributes. Proxies are keyed using a repr because default values can be mutable types. ### Response: def build(self, validator, namespace, config_key, default, help): """Build or retrieve a ValueProxy from the attributes. Proxies are keyed using a repr because default values can be mutable types. """ proxy_attrs = validator, namespace, config_key, default proxy_key = repr(proxy_attrs) if proxy_key in self.proxies: return self.proxies[proxy_key] value_proxy = proxy.ValueProxy(*proxy_attrs) register_value_proxy(namespace, value_proxy, help) return self.proxies.setdefault(proxy_key, value_proxy)
def db_value(self, value): """Convert the python value for storage in the database.""" value = self.transform_value(value) return self.hhash.encrypt(value, salt_size=self.salt_size, rounds=self.rounds)
Convert the python value for storage in the database.
Below is the the instruction that describes the task: ### Input: Convert the python value for storage in the database. ### Response: def db_value(self, value): """Convert the python value for storage in the database.""" value = self.transform_value(value) return self.hhash.encrypt(value, salt_size=self.salt_size, rounds=self.rounds)
def _put_data(self, ud, ase, offsets, data): # type: (Uploader, blobxfer.models.upload.Descriptor, # blobxfer.models.azure.StorageEntity, # blobxfer.models.upload.Offsets, bytes) -> None """Put data in Azure :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload """ if ase.mode == blobxfer.models.azure.StorageModes.Append: # append block if data is not None: blobxfer.operations.azure.blob.append.append_block(ase, data) elif ase.mode == blobxfer.models.azure.StorageModes.Block: # handle one-shot uploads if ud.is_one_shot_block_blob: metadata = ud.generate_metadata() if not ud.entity.is_encrypted and ud.must_compute_md5: digest = blobxfer.util.base64_encode_as_string( ud.md5.digest()) else: digest = None blobxfer.operations.azure.blob.block.create_blob( ase, data, digest, metadata) return # upload block if data is not None: blobxfer.operations.azure.blob.block.put_block( ase, offsets, data) elif ase.mode == blobxfer.models.azure.StorageModes.File: # upload range if data is not None: blobxfer.operations.azure.file.put_file_range( ase, offsets, data) elif ase.mode == blobxfer.models.azure.StorageModes.Page: if data is None: return # compute aligned size aligned = blobxfer.util.page_align_content_length( offsets.num_bytes) # align page if aligned != offsets.num_bytes: data = data.ljust(aligned, b'\0') if blobxfer.operations.md5.check_data_is_empty(data): return # upload page blobxfer.operations.azure.blob.page.put_page( ase, offsets.range_start, offsets.range_start + aligned - 1, data)
Put data in Azure :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload
Below is the the instruction that describes the task: ### Input: Put data in Azure :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload ### Response: def _put_data(self, ud, ase, offsets, data): # type: (Uploader, blobxfer.models.upload.Descriptor, # blobxfer.models.azure.StorageEntity, # blobxfer.models.upload.Offsets, bytes) -> None """Put data in Azure :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload """ if ase.mode == blobxfer.models.azure.StorageModes.Append: # append block if data is not None: blobxfer.operations.azure.blob.append.append_block(ase, data) elif ase.mode == blobxfer.models.azure.StorageModes.Block: # handle one-shot uploads if ud.is_one_shot_block_blob: metadata = ud.generate_metadata() if not ud.entity.is_encrypted and ud.must_compute_md5: digest = blobxfer.util.base64_encode_as_string( ud.md5.digest()) else: digest = None blobxfer.operations.azure.blob.block.create_blob( ase, data, digest, metadata) return # upload block if data is not None: blobxfer.operations.azure.blob.block.put_block( ase, offsets, data) elif ase.mode == blobxfer.models.azure.StorageModes.File: # upload range if data is not None: blobxfer.operations.azure.file.put_file_range( ase, offsets, data) elif ase.mode == blobxfer.models.azure.StorageModes.Page: if data is None: return # compute aligned size aligned = blobxfer.util.page_align_content_length( offsets.num_bytes) # align page if aligned != offsets.num_bytes: data = data.ljust(aligned, b'\0') if blobxfer.operations.md5.check_data_is_empty(data): return # upload page blobxfer.operations.azure.blob.page.put_page( ase, offsets.range_start, offsets.range_start + aligned - 1, data)
def read(keypath, configfile=None): """ Reads a value from the configuration file. Args: keypath: str Specifies the key for which the value is desired. It can be a hierarchical path. Example: "section1.subsection.key1" configfile: str Path to the config file to read. Defaults to None, in which case the application's default config file is used. Returns: value from configuration file """ if configfile in _configs: appconfig = _configs[configfile] else: appconfig = AppConfig(configfile=configfile) _configs[configfile] = appconfig return appconfig.read(keypath)
Reads a value from the configuration file. Args: keypath: str Specifies the key for which the value is desired. It can be a hierarchical path. Example: "section1.subsection.key1" configfile: str Path to the config file to read. Defaults to None, in which case the application's default config file is used. Returns: value from configuration file
Below is the the instruction that describes the task: ### Input: Reads a value from the configuration file. Args: keypath: str Specifies the key for which the value is desired. It can be a hierarchical path. Example: "section1.subsection.key1" configfile: str Path to the config file to read. Defaults to None, in which case the application's default config file is used. Returns: value from configuration file ### Response: def read(keypath, configfile=None): """ Reads a value from the configuration file. Args: keypath: str Specifies the key for which the value is desired. It can be a hierarchical path. Example: "section1.subsection.key1" configfile: str Path to the config file to read. Defaults to None, in which case the application's default config file is used. Returns: value from configuration file """ if configfile in _configs: appconfig = _configs[configfile] else: appconfig = AppConfig(configfile=configfile) _configs[configfile] = appconfig return appconfig.read(keypath)