code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _compile_messages(self): ''' Create a list of all OS messages and their compiled regexs ''' self.compiled_messages = [] if not self._config: return for message_dict in self._config.get('messages', {}): error = message_dict['error'] tag = message_dict['tag'] model = message_dict['model'] match_on = message_dict.get('match_on', 'tag') if '__python_fun__' in message_dict: self.compiled_messages.append({ 'error': error, 'tag': tag, 'match_on': match_on, 'model': model, '__python_fun__': message_dict['__python_fun__'] }) continue values = message_dict['values'] line = message_dict['line'] mapping = message_dict['mapping'] # We will now figure out which position each value is in so we can use it with the match statement position = {} replace = {} for key in values.keys(): if '|' in key: new_key, replace[new_key] = key.replace(' ', '').split('|') values[new_key] = values.pop(key) key = new_key position[line.find('{' + key + '}')] = key sorted_position = {} for i, elem in enumerate(sorted(position.items())): sorted_position[elem[1]] = i + 1 # Escape the line, then remove the escape for the curly bracets so they can be used when formatting escaped = re.escape(line).replace(r'\{', '{').replace(r'\}', '}') # Replace a whitespace with \s+ escaped = escaped.replace(r'\ ', r'\s+') self.compiled_messages.append( { 'error': error, 'tag': tag, 'match_on': match_on, 'line': re.compile(escaped.format(**values)), 'positions': sorted_position, 'values': values, 'replace': replace, 'model': model, 'mapping': mapping } ) log.debug('Compiled messages:') log.debug(self.compiled_messages)
Create a list of all OS messages and their compiled regexs
Below is the the instruction that describes the task: ### Input: Create a list of all OS messages and their compiled regexs ### Response: def _compile_messages(self): ''' Create a list of all OS messages and their compiled regexs ''' self.compiled_messages = [] if not self._config: return for message_dict in self._config.get('messages', {}): error = message_dict['error'] tag = message_dict['tag'] model = message_dict['model'] match_on = message_dict.get('match_on', 'tag') if '__python_fun__' in message_dict: self.compiled_messages.append({ 'error': error, 'tag': tag, 'match_on': match_on, 'model': model, '__python_fun__': message_dict['__python_fun__'] }) continue values = message_dict['values'] line = message_dict['line'] mapping = message_dict['mapping'] # We will now figure out which position each value is in so we can use it with the match statement position = {} replace = {} for key in values.keys(): if '|' in key: new_key, replace[new_key] = key.replace(' ', '').split('|') values[new_key] = values.pop(key) key = new_key position[line.find('{' + key + '}')] = key sorted_position = {} for i, elem in enumerate(sorted(position.items())): sorted_position[elem[1]] = i + 1 # Escape the line, then remove the escape for the curly bracets so they can be used when formatting escaped = re.escape(line).replace(r'\{', '{').replace(r'\}', '}') # Replace a whitespace with \s+ escaped = escaped.replace(r'\ ', r'\s+') self.compiled_messages.append( { 'error': error, 'tag': tag, 'match_on': match_on, 'line': re.compile(escaped.format(**values)), 'positions': sorted_position, 'values': values, 'replace': replace, 'model': model, 'mapping': mapping } ) log.debug('Compiled messages:') log.debug(self.compiled_messages)
def _column_pad_filter(self, next_filter): """ Expand blank lines caused from overflow of other columns to blank whitespace. E.g. INPUT: [ ["1a", "2a"], [None, "2b"], ["1b", "2c"], [None, "2d"] ] OUTPUT: [ ["1a", "2a"], [<blank>, "2b"], ["1b", "2c"], [<blank>, "2d"] ] """ next(next_filter) while True: line = list((yield)) for i, col in enumerate(line): if col is None: line[i] = self._get_blank_cell(i) next_filter.send(line)
Expand blank lines caused from overflow of other columns to blank whitespace. E.g. INPUT: [ ["1a", "2a"], [None, "2b"], ["1b", "2c"], [None, "2d"] ] OUTPUT: [ ["1a", "2a"], [<blank>, "2b"], ["1b", "2c"], [<blank>, "2d"] ]
Below is the the instruction that describes the task: ### Input: Expand blank lines caused from overflow of other columns to blank whitespace. E.g. INPUT: [ ["1a", "2a"], [None, "2b"], ["1b", "2c"], [None, "2d"] ] OUTPUT: [ ["1a", "2a"], [<blank>, "2b"], ["1b", "2c"], [<blank>, "2d"] ] ### Response: def _column_pad_filter(self, next_filter): """ Expand blank lines caused from overflow of other columns to blank whitespace. E.g. INPUT: [ ["1a", "2a"], [None, "2b"], ["1b", "2c"], [None, "2d"] ] OUTPUT: [ ["1a", "2a"], [<blank>, "2b"], ["1b", "2c"], [<blank>, "2d"] ] """ next(next_filter) while True: line = list((yield)) for i, col in enumerate(line): if col is None: line[i] = self._get_blank_cell(i) next_filter.send(line)
def cancel_email_change(self): """ Cancel email change for new users and roll back data """ if not self.email_new: return self.email_new = None self.email_confirmed = True self.email_link = None self.email_new = None self.email_link_expires = None
Cancel email change for new users and roll back data
Below is the the instruction that describes the task: ### Input: Cancel email change for new users and roll back data ### Response: def cancel_email_change(self): """ Cancel email change for new users and roll back data """ if not self.email_new: return self.email_new = None self.email_confirmed = True self.email_link = None self.email_new = None self.email_link_expires = None
def deepupdate(original, update, levels=5): """Update, like dict.update, but deeper. Update 'original' from dict/iterable 'update'. I.e., it recurses on dicts 'levels' times if necessary. A standard dict.update is levels=0 """ if not isinstance(update, dict): update = dict(update) if not levels > 0: original.update(update) else: for key, val in update.items(): if isinstance(original.get(key), dict): # might need a force=True to override this if not isinstance(val, dict): raise TypeError("Trying to update dict %s with " "non-dict %s" % (original[key], val)) deepupdate(original[key], val, levels=levels-1) else: original.update({key: val})
Update, like dict.update, but deeper. Update 'original' from dict/iterable 'update'. I.e., it recurses on dicts 'levels' times if necessary. A standard dict.update is levels=0
Below is the the instruction that describes the task: ### Input: Update, like dict.update, but deeper. Update 'original' from dict/iterable 'update'. I.e., it recurses on dicts 'levels' times if necessary. A standard dict.update is levels=0 ### Response: def deepupdate(original, update, levels=5): """Update, like dict.update, but deeper. Update 'original' from dict/iterable 'update'. I.e., it recurses on dicts 'levels' times if necessary. A standard dict.update is levels=0 """ if not isinstance(update, dict): update = dict(update) if not levels > 0: original.update(update) else: for key, val in update.items(): if isinstance(original.get(key), dict): # might need a force=True to override this if not isinstance(val, dict): raise TypeError("Trying to update dict %s with " "non-dict %s" % (original[key], val)) deepupdate(original[key], val, levels=levels-1) else: original.update({key: val})
def export(self, output_folder): """Export matrices as ``*.npy`` files to an output folder.""" if not os.path.exists(output_folder): os.makedirs(output_folder) self._interact_with_folder(output_folder, 'w')
Export matrices as ``*.npy`` files to an output folder.
Below is the the instruction that describes the task: ### Input: Export matrices as ``*.npy`` files to an output folder. ### Response: def export(self, output_folder): """Export matrices as ``*.npy`` files to an output folder.""" if not os.path.exists(output_folder): os.makedirs(output_folder) self._interact_with_folder(output_folder, 'w')
def refund_order(self, order_id, **params): """https://developers.coinbase.com/api/v2#refund-an-order""" for required in ['currency']: if required not in params: raise ValueError("Missing required parameter: %s" % required) response = self._post('v2', 'orders', order_id, 'refund', data=params) return self._make_api_object(response, Order)
https://developers.coinbase.com/api/v2#refund-an-order
Below is the the instruction that describes the task: ### Input: https://developers.coinbase.com/api/v2#refund-an-order ### Response: def refund_order(self, order_id, **params): """https://developers.coinbase.com/api/v2#refund-an-order""" for required in ['currency']: if required not in params: raise ValueError("Missing required parameter: %s" % required) response = self._post('v2', 'orders', order_id, 'refund', data=params) return self._make_api_object(response, Order)
def fprint(fmt, *args, **kwargs): """Parse and print a colored and perhaps formatted string. The remaining keyword arguments are the same as for Python's built-in print function. Colors are returning to their defaults before the function returns. """ if not fmt: return hascolor = False target = kwargs.get("target", sys.stdout) # Format the string before feeding it to the parser fmt = fmt.format(*args, **kwargs) for txt, markups in _color_format_parser.parse(fmt): if markups != (None, None): _color_manager.set_color(*markups) hascolor = True else: if hascolor: _color_manager.set_defaults() hascolor = False target.write(txt) target.flush() # Needed for Python 3.x _color_manager.set_defaults() target.write(kwargs.get('end', '\n')) _color_manager.set_defaults()
Parse and print a colored and perhaps formatted string. The remaining keyword arguments are the same as for Python's built-in print function. Colors are returning to their defaults before the function returns.
Below is the the instruction that describes the task: ### Input: Parse and print a colored and perhaps formatted string. The remaining keyword arguments are the same as for Python's built-in print function. Colors are returning to their defaults before the function returns. ### Response: def fprint(fmt, *args, **kwargs): """Parse and print a colored and perhaps formatted string. The remaining keyword arguments are the same as for Python's built-in print function. Colors are returning to their defaults before the function returns. """ if not fmt: return hascolor = False target = kwargs.get("target", sys.stdout) # Format the string before feeding it to the parser fmt = fmt.format(*args, **kwargs) for txt, markups in _color_format_parser.parse(fmt): if markups != (None, None): _color_manager.set_color(*markups) hascolor = True else: if hascolor: _color_manager.set_defaults() hascolor = False target.write(txt) target.flush() # Needed for Python 3.x _color_manager.set_defaults() target.write(kwargs.get('end', '\n')) _color_manager.set_defaults()
def create_primes(threshold): """ Generate prime values using sieve of Eratosthenes method. Args: threshold (int): The upper bound for the size of the prime values. Returns (List[int]): All primes from 2 and up to ``threshold``. """ if threshold == 2: return [2] elif threshold < 2: return [] numbers = list(range(3, threshold+1, 2)) root_of_threshold = threshold ** 0.5 half = int((threshold+1)/2-1) idx = 0 counter = 3 while counter <= root_of_threshold: if numbers[idx]: idy = int((counter*counter-3)/2) numbers[idy] = 0 while idy < half: numbers[idy] = 0 idy += counter idx += 1 counter = 2*idx+3 return [2] + [number for number in numbers if number]
Generate prime values using sieve of Eratosthenes method. Args: threshold (int): The upper bound for the size of the prime values. Returns (List[int]): All primes from 2 and up to ``threshold``.
Below is the the instruction that describes the task: ### Input: Generate prime values using sieve of Eratosthenes method. Args: threshold (int): The upper bound for the size of the prime values. Returns (List[int]): All primes from 2 and up to ``threshold``. ### Response: def create_primes(threshold): """ Generate prime values using sieve of Eratosthenes method. Args: threshold (int): The upper bound for the size of the prime values. Returns (List[int]): All primes from 2 and up to ``threshold``. """ if threshold == 2: return [2] elif threshold < 2: return [] numbers = list(range(3, threshold+1, 2)) root_of_threshold = threshold ** 0.5 half = int((threshold+1)/2-1) idx = 0 counter = 3 while counter <= root_of_threshold: if numbers[idx]: idy = int((counter*counter-3)/2) numbers[idy] = 0 while idy < half: numbers[idy] = 0 idy += counter idx += 1 counter = 2*idx+3 return [2] + [number for number in numbers if number]
def addsalt(self, salt): """ adds a salt to the hash function (OPTIONAL) should be called AFTER Init, and BEFORE update salt: a bytestring, length determined by hashbitlen. if not of sufficient length, the bytestring will be assumed to be a big endian number and prefixed with an appropriate number of null bytes, and if too large, only the low order bytes will be used. if hashbitlen=224 or 256, then salt will be 16 bytes if hashbitlen=384 or 512, then salt will be 32 bytes """ # fail if addsalt() was not called at the right time if self.state != 1: raise Exception('addsalt() not called after init() and before update()') # salt size is to be 4x word size saltsize = self.WORDBYTES * 4 # if too short, prefix with null bytes. if too long, # truncate high order bytes if len(salt) < saltsize: salt = (chr(0)*(saltsize-len(salt)) + salt) else: salt = salt[-saltsize:] # prep the salt array self.salt[0] = self.byte2int(salt[ : 4<<self.mul]) self.salt[1] = self.byte2int(salt[ 4<<self.mul: 8<<self.mul]) self.salt[2] = self.byte2int(salt[ 8<<self.mul:12<<self.mul]) self.salt[3] = self.byte2int(salt[12<<self.mul: ])
adds a salt to the hash function (OPTIONAL) should be called AFTER Init, and BEFORE update salt: a bytestring, length determined by hashbitlen. if not of sufficient length, the bytestring will be assumed to be a big endian number and prefixed with an appropriate number of null bytes, and if too large, only the low order bytes will be used. if hashbitlen=224 or 256, then salt will be 16 bytes if hashbitlen=384 or 512, then salt will be 32 bytes
Below is the the instruction that describes the task: ### Input: adds a salt to the hash function (OPTIONAL) should be called AFTER Init, and BEFORE update salt: a bytestring, length determined by hashbitlen. if not of sufficient length, the bytestring will be assumed to be a big endian number and prefixed with an appropriate number of null bytes, and if too large, only the low order bytes will be used. if hashbitlen=224 or 256, then salt will be 16 bytes if hashbitlen=384 or 512, then salt will be 32 bytes ### Response: def addsalt(self, salt): """ adds a salt to the hash function (OPTIONAL) should be called AFTER Init, and BEFORE update salt: a bytestring, length determined by hashbitlen. if not of sufficient length, the bytestring will be assumed to be a big endian number and prefixed with an appropriate number of null bytes, and if too large, only the low order bytes will be used. if hashbitlen=224 or 256, then salt will be 16 bytes if hashbitlen=384 or 512, then salt will be 32 bytes """ # fail if addsalt() was not called at the right time if self.state != 1: raise Exception('addsalt() not called after init() and before update()') # salt size is to be 4x word size saltsize = self.WORDBYTES * 4 # if too short, prefix with null bytes. if too long, # truncate high order bytes if len(salt) < saltsize: salt = (chr(0)*(saltsize-len(salt)) + salt) else: salt = salt[-saltsize:] # prep the salt array self.salt[0] = self.byte2int(salt[ : 4<<self.mul]) self.salt[1] = self.byte2int(salt[ 4<<self.mul: 8<<self.mul]) self.salt[2] = self.byte2int(salt[ 8<<self.mul:12<<self.mul]) self.salt[3] = self.byte2int(salt[12<<self.mul: ])
def tile_to_quadkey(tile, level): """Transform tile coordinates to a quadkey""" tile_x = tile[0] tile_y = tile[1] quadkey = "" for i in xrange(level): bit = level - i digit = ord('0') mask = 1 << (bit - 1) # if (bit - 1) > 0 else 1 >> (bit - 1) if (tile_x & mask) is not 0: digit += 1 if (tile_y & mask) is not 0: digit += 2 quadkey += chr(digit) return quadkey
Transform tile coordinates to a quadkey
Below is the the instruction that describes the task: ### Input: Transform tile coordinates to a quadkey ### Response: def tile_to_quadkey(tile, level): """Transform tile coordinates to a quadkey""" tile_x = tile[0] tile_y = tile[1] quadkey = "" for i in xrange(level): bit = level - i digit = ord('0') mask = 1 << (bit - 1) # if (bit - 1) > 0 else 1 >> (bit - 1) if (tile_x & mask) is not 0: digit += 1 if (tile_y & mask) is not 0: digit += 2 quadkey += chr(digit) return quadkey
def InsertFloatArg(self, string="", **_): """Inserts a Float argument.""" try: float_value = float(string) return self.InsertArg(float_value) except (TypeError, ValueError): raise ParseError("%s is not a valid float." % string)
Inserts a Float argument.
Below is the the instruction that describes the task: ### Input: Inserts a Float argument. ### Response: def InsertFloatArg(self, string="", **_): """Inserts a Float argument.""" try: float_value = float(string) return self.InsertArg(float_value) except (TypeError, ValueError): raise ParseError("%s is not a valid float." % string)
def changed(self, thresh=0.05, idx=True): """ Changed features. {threshdoc} """ ind = self.data[self.pval_column] <= thresh if idx: return ind return self[ind]
Changed features. {threshdoc}
Below is the the instruction that describes the task: ### Input: Changed features. {threshdoc} ### Response: def changed(self, thresh=0.05, idx=True): """ Changed features. {threshdoc} """ ind = self.data[self.pval_column] <= thresh if idx: return ind return self[ind]
def _init_orient(self): """Retrieve the quadrature points and weights if needed. """ if self.orient == orientation.orient_averaged_fixed: (self.beta_p, self.beta_w) = quadrature.get_points_and_weights( self.or_pdf, 0, 180, self.n_beta) self._set_orient_signature()
Retrieve the quadrature points and weights if needed.
Below is the the instruction that describes the task: ### Input: Retrieve the quadrature points and weights if needed. ### Response: def _init_orient(self): """Retrieve the quadrature points and weights if needed. """ if self.orient == orientation.orient_averaged_fixed: (self.beta_p, self.beta_w) = quadrature.get_points_and_weights( self.or_pdf, 0, 180, self.n_beta) self._set_orient_signature()
def get_parent(self, update=False): """:returns: the parent node of the current node object.""" if self._meta.proxy_for_model: # the current node is a proxy model; the returned parent # should be the same proxy model, so we need to explicitly # fetch it as an instance of that model rather than simply # following the 'parent' relation if self.parent_id is None: return None else: return self.__class__.objects.get(pk=self.parent_id) else: return self.parent
:returns: the parent node of the current node object.
Below is the the instruction that describes the task: ### Input: :returns: the parent node of the current node object. ### Response: def get_parent(self, update=False): """:returns: the parent node of the current node object.""" if self._meta.proxy_for_model: # the current node is a proxy model; the returned parent # should be the same proxy model, so we need to explicitly # fetch it as an instance of that model rather than simply # following the 'parent' relation if self.parent_id is None: return None else: return self.__class__.objects.get(pk=self.parent_id) else: return self.parent
def request_vpc_peering_connection(name, requester_vpc_id=None, requester_vpc_name=None, peer_vpc_id=None, peer_vpc_name=None, conn_name=None, peer_owner_id=None, peer_region=None, region=None, key=None, keyid=None, profile=None): ''' name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. String type. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. String type. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. String type. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC the same account and region. Exclusive with peer_vpc_id. String type. conn_name The (optional) name to use for this VPC peering connection. String type. peer_owner_id ID of the owner of the peer VPC. String type. If this isn't supplied AWS uses your account ID. Required if peering to a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml request a vpc peering connection: boto_vpc.request_vpc_peering_connection: - requester_vpc_id: vpc-4b3522e - peer_vpc_id: vpc-ae83f9ca - conn_name: salt_peering_connection ''' log.debug('Called state to request VPC peering connection') ret = { 'name': name, 'result': True, 'changes': {}, 'comment': 'Boto VPC peering state' } if conn_name: vpc_ids = __salt__['boto_vpc.describe_vpc_peering_connection']( conn_name, region=region, key=key, keyid=keyid, profile=profile ).get('VPC-Peerings', []) else: vpc_ids = [] if vpc_ids: ret['comment'] = ('VPC peering connection already exists, ' 'nothing to be done.') return ret if __opts__['test']: if not vpc_ids: ret['comment'] = 'VPC peering connection will be created' return ret log.debug('Called module to create VPC peering connection') result = __salt__['boto_vpc.request_vpc_peering_connection']( requester_vpc_id, requester_vpc_name, peer_vpc_id, peer_vpc_name, name=conn_name, peer_owner_id=peer_owner_id, peer_region=peer_region, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result: ret['comment'] = "Failed to request VPC peering: {0}".format(result['error']) ret['result'] = False return ret ret['changes'].update({ 'old': '', 'new': result['msg'] }) return ret
name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. String type. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. String type. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. String type. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC the same account and region. Exclusive with peer_vpc_id. String type. conn_name The (optional) name to use for this VPC peering connection. String type. peer_owner_id ID of the owner of the peer VPC. String type. If this isn't supplied AWS uses your account ID. Required if peering to a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml request a vpc peering connection: boto_vpc.request_vpc_peering_connection: - requester_vpc_id: vpc-4b3522e - peer_vpc_id: vpc-ae83f9ca - conn_name: salt_peering_connection
Below is the the instruction that describes the task: ### Input: name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. String type. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. String type. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. String type. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC the same account and region. Exclusive with peer_vpc_id. String type. conn_name The (optional) name to use for this VPC peering connection. String type. peer_owner_id ID of the owner of the peer VPC. String type. If this isn't supplied AWS uses your account ID. Required if peering to a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml request a vpc peering connection: boto_vpc.request_vpc_peering_connection: - requester_vpc_id: vpc-4b3522e - peer_vpc_id: vpc-ae83f9ca - conn_name: salt_peering_connection ### Response: def request_vpc_peering_connection(name, requester_vpc_id=None, requester_vpc_name=None, peer_vpc_id=None, peer_vpc_name=None, conn_name=None, peer_owner_id=None, peer_region=None, region=None, key=None, keyid=None, profile=None): ''' name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. String type. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. String type. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. String type. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC the same account and region. Exclusive with peer_vpc_id. String type. conn_name The (optional) name to use for this VPC peering connection. String type. peer_owner_id ID of the owner of the peer VPC. String type. If this isn't supplied AWS uses your account ID. Required if peering to a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml request a vpc peering connection: boto_vpc.request_vpc_peering_connection: - requester_vpc_id: vpc-4b3522e - peer_vpc_id: vpc-ae83f9ca - conn_name: salt_peering_connection ''' log.debug('Called state to request VPC peering connection') ret = { 'name': name, 'result': True, 'changes': {}, 'comment': 'Boto VPC peering state' } if conn_name: vpc_ids = __salt__['boto_vpc.describe_vpc_peering_connection']( conn_name, region=region, key=key, keyid=keyid, profile=profile ).get('VPC-Peerings', []) else: vpc_ids = [] if vpc_ids: ret['comment'] = ('VPC peering connection already exists, ' 'nothing to be done.') return ret if __opts__['test']: if not vpc_ids: ret['comment'] = 'VPC peering connection will be created' return ret log.debug('Called module to create VPC peering connection') result = __salt__['boto_vpc.request_vpc_peering_connection']( requester_vpc_id, requester_vpc_name, peer_vpc_id, peer_vpc_name, name=conn_name, peer_owner_id=peer_owner_id, peer_region=peer_region, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result: ret['comment'] = "Failed to request VPC peering: {0}".format(result['error']) ret['result'] = False return ret ret['changes'].update({ 'old': '', 'new': result['msg'] }) return ret
def verify_verify(self, id, token): """Verify the token of a specific verification.""" return Verify().load(self.request('verify/' + str(id), params={'token': token}))
Verify the token of a specific verification.
Below is the the instruction that describes the task: ### Input: Verify the token of a specific verification. ### Response: def verify_verify(self, id, token): """Verify the token of a specific verification.""" return Verify().load(self.request('verify/' + str(id), params={'token': token}))
def encode_vlq(i): """ Encode integer `i` into a VLQ encoded string. """ # shift in the sign to least significant bit raw = (-i << 1) + 1 if i < 0 else i << 1 if raw < VLQ_MULTI_CHAR: # short-circuit simple case as it doesn't need continuation return INT_B64[raw] result = [] while raw: # assume continue result.append(raw & VLQ_BASE_MASK | VLQ_CONT) # shift out processed bits raw = raw >> VLQ_SHIFT # discontinue the last unit result[-1] &= VLQ_BASE_MASK return ''.join(INT_B64[i] for i in result)
Encode integer `i` into a VLQ encoded string.
Below is the the instruction that describes the task: ### Input: Encode integer `i` into a VLQ encoded string. ### Response: def encode_vlq(i): """ Encode integer `i` into a VLQ encoded string. """ # shift in the sign to least significant bit raw = (-i << 1) + 1 if i < 0 else i << 1 if raw < VLQ_MULTI_CHAR: # short-circuit simple case as it doesn't need continuation return INT_B64[raw] result = [] while raw: # assume continue result.append(raw & VLQ_BASE_MASK | VLQ_CONT) # shift out processed bits raw = raw >> VLQ_SHIFT # discontinue the last unit result[-1] &= VLQ_BASE_MASK return ''.join(INT_B64[i] for i in result)
def refresh(self): """ Flush the canvas content to the underlying screen. """ self._screen.block_transfer(self._buffer, self._dx, self._dy)
Flush the canvas content to the underlying screen.
Below is the the instruction that describes the task: ### Input: Flush the canvas content to the underlying screen. ### Response: def refresh(self): """ Flush the canvas content to the underlying screen. """ self._screen.block_transfer(self._buffer, self._dx, self._dy)
def on_menu(self, event): '''called on menu event''' state = self.state if self.popup_menu is not None: ret = self.popup_menu.find_selected(event) if ret is not None: ret.popup_pos = self.popup_pos if ret.returnkey == 'fitWindow': self.fit_to_window() elif ret.returnkey == 'fullSize': self.full_size() else: state.out_queue.put(ret) return if self.menu is not None: ret = self.menu.find_selected(event) if ret is not None: state.out_queue.put(ret) return
called on menu event
Below is the the instruction that describes the task: ### Input: called on menu event ### Response: def on_menu(self, event): '''called on menu event''' state = self.state if self.popup_menu is not None: ret = self.popup_menu.find_selected(event) if ret is not None: ret.popup_pos = self.popup_pos if ret.returnkey == 'fitWindow': self.fit_to_window() elif ret.returnkey == 'fullSize': self.full_size() else: state.out_queue.put(ret) return if self.menu is not None: ret = self.menu.find_selected(event) if ret is not None: state.out_queue.put(ret) return
def stacked_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None, add_l2_losses=False): """ Number of convolutional layers stacked on top of each other Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) add_l2_losses: whether to add l2 losses on network kernels to tf.GraphKeys.REGULARIZATION_LOSSES or not Returns: units: tensor at the output of the last convolutional layer """ l2_reg = tf.nn.l2_loss if add_l2_losses else None for n_layer, n_hidden in enumerate(n_hidden_list): if use_dilation: dilation_rate = 2 ** n_layer else: dilation_rate = 1 units = tf.layers.conv1d(units, n_hidden, filter_width, padding='same', dilation_rate=dilation_rate, kernel_initializer=INITIALIZER(), kernel_regularizer=l2_reg) if use_batch_norm: assert training_ph is not None units = tf.layers.batch_normalization(units, training=training_ph) units = tf.nn.relu(units) return units
Number of convolutional layers stacked on top of each other Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) add_l2_losses: whether to add l2 losses on network kernels to tf.GraphKeys.REGULARIZATION_LOSSES or not Returns: units: tensor at the output of the last convolutional layer
Below is the the instruction that describes the task: ### Input: Number of convolutional layers stacked on top of each other Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) add_l2_losses: whether to add l2 losses on network kernels to tf.GraphKeys.REGULARIZATION_LOSSES or not Returns: units: tensor at the output of the last convolutional layer ### Response: def stacked_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None, add_l2_losses=False): """ Number of convolutional layers stacked on top of each other Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) add_l2_losses: whether to add l2 losses on network kernels to tf.GraphKeys.REGULARIZATION_LOSSES or not Returns: units: tensor at the output of the last convolutional layer """ l2_reg = tf.nn.l2_loss if add_l2_losses else None for n_layer, n_hidden in enumerate(n_hidden_list): if use_dilation: dilation_rate = 2 ** n_layer else: dilation_rate = 1 units = tf.layers.conv1d(units, n_hidden, filter_width, padding='same', dilation_rate=dilation_rate, kernel_initializer=INITIALIZER(), kernel_regularizer=l2_reg) if use_batch_norm: assert training_ph is not None units = tf.layers.batch_normalization(units, training=training_ph) units = tf.nn.relu(units) return units
def keys_to_typing(value): """Processes the values that will be typed in the element.""" typing = [] for val in value: if isinstance(val, Keys): typing.append(val) elif isinstance(val, int): val = str(val) for i in range(len(val)): typing.append(val[i]) else: for i in range(len(val)): typing.append(val[i]) return typing
Processes the values that will be typed in the element.
Below is the the instruction that describes the task: ### Input: Processes the values that will be typed in the element. ### Response: def keys_to_typing(value): """Processes the values that will be typed in the element.""" typing = [] for val in value: if isinstance(val, Keys): typing.append(val) elif isinstance(val, int): val = str(val) for i in range(len(val)): typing.append(val[i]) else: for i in range(len(val)): typing.append(val[i]) return typing
def _query_server_pos(self, conn, file_length): """ Queries server to find out what bytes it currently has. Returns (server_start, server_end), where the values are inclusive. For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. Raises ResumableUploadException if problem querying server. """ resp = self._query_server_state(conn, file_length) if resp.status == 200: return (0, file_length) # Completed upload. if resp.status != 308: # This means the server didn't have any state for the given # upload ID, which can happen (for example) if the caller saved # the tracker URI to a file and then tried to restart the transfer # after that upload ID has gone stale. In that case we need to # start a new transfer (and the caller will then save the new # tracker URI to the tracker file). raise ResumableUploadException( 'Got non-308 response (%s) from server state query' % resp.status, ResumableTransferDisposition.START_OVER) got_valid_response = False range_spec = resp.getheader('range') if range_spec: # Parse 'bytes=<from>-<to>' range_spec. m = re.search('bytes=(\d+)-(\d+)', range_spec) if m: server_start = long(m.group(1)) server_end = long(m.group(2)) got_valid_response = True else: # No Range header, which means the server does not yet have # any bytes. Note that the Range header uses inclusive 'from' # and 'to' values. Since Range 0-0 would mean that the server # has byte 0, omitting the Range header is used to indicate that # the server doesn't have any bytes. return self.SERVER_HAS_NOTHING if not got_valid_response: raise ResumableUploadException( 'Couldn\'t parse upload server state query response (%s)' % str(resp.getheaders()), ResumableTransferDisposition.START_OVER) if conn.debug >= 1: print 'Server has: Range: %d - %d.' % (server_start, server_end) return (server_start, server_end)
Queries server to find out what bytes it currently has. Returns (server_start, server_end), where the values are inclusive. For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. Raises ResumableUploadException if problem querying server.
Below is the the instruction that describes the task: ### Input: Queries server to find out what bytes it currently has. Returns (server_start, server_end), where the values are inclusive. For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. Raises ResumableUploadException if problem querying server. ### Response: def _query_server_pos(self, conn, file_length): """ Queries server to find out what bytes it currently has. Returns (server_start, server_end), where the values are inclusive. For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2. Raises ResumableUploadException if problem querying server. """ resp = self._query_server_state(conn, file_length) if resp.status == 200: return (0, file_length) # Completed upload. if resp.status != 308: # This means the server didn't have any state for the given # upload ID, which can happen (for example) if the caller saved # the tracker URI to a file and then tried to restart the transfer # after that upload ID has gone stale. In that case we need to # start a new transfer (and the caller will then save the new # tracker URI to the tracker file). raise ResumableUploadException( 'Got non-308 response (%s) from server state query' % resp.status, ResumableTransferDisposition.START_OVER) got_valid_response = False range_spec = resp.getheader('range') if range_spec: # Parse 'bytes=<from>-<to>' range_spec. m = re.search('bytes=(\d+)-(\d+)', range_spec) if m: server_start = long(m.group(1)) server_end = long(m.group(2)) got_valid_response = True else: # No Range header, which means the server does not yet have # any bytes. Note that the Range header uses inclusive 'from' # and 'to' values. Since Range 0-0 would mean that the server # has byte 0, omitting the Range header is used to indicate that # the server doesn't have any bytes. return self.SERVER_HAS_NOTHING if not got_valid_response: raise ResumableUploadException( 'Couldn\'t parse upload server state query response (%s)' % str(resp.getheaders()), ResumableTransferDisposition.START_OVER) if conn.debug >= 1: print 'Server has: Range: %d - %d.' % (server_start, server_end) return (server_start, server_end)
def compute_acl(cls, filename, start_index=None, end_index=None, min_nsamples=10): """Computes the autocorrleation length for all model params and temperatures in the given file. Parameter values are averaged over all walkers at each iteration and temperature. The ACL is then calculated over the averaged chain. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : {None, int} The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : {None, int} The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary of ntemps-long arrays of the ACLs of each parameter. """ acls = {} with cls._io(filename, 'r') as fp: if end_index is None: end_index = fp.niterations tidx = numpy.arange(fp.ntemps) for param in fp.variable_params: these_acls = numpy.zeros(fp.ntemps) for tk in tidx: samples = fp.read_raw_samples( param, thin_start=start_index, thin_interval=1, thin_end=end_index, temps=tk, flatten=False)[param] # contract the walker dimension using the mean, and flatten # the (length 1) temp dimension samples = samples.mean(axis=1)[0, :] if samples.size < min_nsamples: acl = numpy.inf else: acl = autocorrelation.calculate_acl(samples) if acl <= 0: acl = numpy.inf these_acls[tk] = acl acls[param] = these_acls return acls
Computes the autocorrleation length for all model params and temperatures in the given file. Parameter values are averaged over all walkers at each iteration and temperature. The ACL is then calculated over the averaged chain. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : {None, int} The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : {None, int} The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary of ntemps-long arrays of the ACLs of each parameter.
Below is the the instruction that describes the task: ### Input: Computes the autocorrleation length for all model params and temperatures in the given file. Parameter values are averaged over all walkers at each iteration and temperature. The ACL is then calculated over the averaged chain. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : {None, int} The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : {None, int} The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary of ntemps-long arrays of the ACLs of each parameter. ### Response: def compute_acl(cls, filename, start_index=None, end_index=None, min_nsamples=10): """Computes the autocorrleation length for all model params and temperatures in the given file. Parameter values are averaged over all walkers at each iteration and temperature. The ACL is then calculated over the averaged chain. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : {None, int} The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : {None, int} The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary of ntemps-long arrays of the ACLs of each parameter. """ acls = {} with cls._io(filename, 'r') as fp: if end_index is None: end_index = fp.niterations tidx = numpy.arange(fp.ntemps) for param in fp.variable_params: these_acls = numpy.zeros(fp.ntemps) for tk in tidx: samples = fp.read_raw_samples( param, thin_start=start_index, thin_interval=1, thin_end=end_index, temps=tk, flatten=False)[param] # contract the walker dimension using the mean, and flatten # the (length 1) temp dimension samples = samples.mean(axis=1)[0, :] if samples.size < min_nsamples: acl = numpy.inf else: acl = autocorrelation.calculate_acl(samples) if acl <= 0: acl = numpy.inf these_acls[tk] = acl acls[param] = these_acls return acls
def linux(cls, path, argv=None, envp=None, entry_symbol=None, symbolic_files=None, concrete_start='', pure_symbolic=False, stdin_size=None, **kwargs): """ Constructor for Linux binary analysis. :param str path: Path to binary to analyze :param argv: Arguments to provide to the binary :type argv: list[str] :param envp: Environment to provide to the binary :type envp: dict[str, str] :param entry_symbol: Entry symbol to resolve to start execution :type envp: str :param symbolic_files: Filenames to mark as having symbolic input :type symbolic_files: list[str] :param str concrete_start: Concrete stdin to use before symbolic input :param int stdin_size: symbolic stdin size to use :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Linux State :rtype: Manticore """ if stdin_size is None: stdin_size = consts.stdin_size try: return cls(_make_linux(path, argv, envp, entry_symbol, symbolic_files, concrete_start, pure_symbolic, stdin_size), **kwargs) except elftools.common.exceptions.ELFError: raise Exception(f'Invalid binary: {path}')
Constructor for Linux binary analysis. :param str path: Path to binary to analyze :param argv: Arguments to provide to the binary :type argv: list[str] :param envp: Environment to provide to the binary :type envp: dict[str, str] :param entry_symbol: Entry symbol to resolve to start execution :type envp: str :param symbolic_files: Filenames to mark as having symbolic input :type symbolic_files: list[str] :param str concrete_start: Concrete stdin to use before symbolic input :param int stdin_size: symbolic stdin size to use :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Linux State :rtype: Manticore
Below is the the instruction that describes the task: ### Input: Constructor for Linux binary analysis. :param str path: Path to binary to analyze :param argv: Arguments to provide to the binary :type argv: list[str] :param envp: Environment to provide to the binary :type envp: dict[str, str] :param entry_symbol: Entry symbol to resolve to start execution :type envp: str :param symbolic_files: Filenames to mark as having symbolic input :type symbolic_files: list[str] :param str concrete_start: Concrete stdin to use before symbolic input :param int stdin_size: symbolic stdin size to use :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Linux State :rtype: Manticore ### Response: def linux(cls, path, argv=None, envp=None, entry_symbol=None, symbolic_files=None, concrete_start='', pure_symbolic=False, stdin_size=None, **kwargs): """ Constructor for Linux binary analysis. :param str path: Path to binary to analyze :param argv: Arguments to provide to the binary :type argv: list[str] :param envp: Environment to provide to the binary :type envp: dict[str, str] :param entry_symbol: Entry symbol to resolve to start execution :type envp: str :param symbolic_files: Filenames to mark as having symbolic input :type symbolic_files: list[str] :param str concrete_start: Concrete stdin to use before symbolic input :param int stdin_size: symbolic stdin size to use :param kwargs: Forwarded to the Manticore constructor :return: Manticore instance, initialized with a Linux State :rtype: Manticore """ if stdin_size is None: stdin_size = consts.stdin_size try: return cls(_make_linux(path, argv, envp, entry_symbol, symbolic_files, concrete_start, pure_symbolic, stdin_size), **kwargs) except elftools.common.exceptions.ELFError: raise Exception(f'Invalid binary: {path}')
def _check_pid(self, allow_reset=False): """Check process id to ensure integrity, reset if in new process.""" if not self.pid == current_process().pid: if allow_reset: self.reset() else: raise RuntimeError("Forbidden operation in multiple processes")
Check process id to ensure integrity, reset if in new process.
Below is the the instruction that describes the task: ### Input: Check process id to ensure integrity, reset if in new process. ### Response: def _check_pid(self, allow_reset=False): """Check process id to ensure integrity, reset if in new process.""" if not self.pid == current_process().pid: if allow_reset: self.reset() else: raise RuntimeError("Forbidden operation in multiple processes")
def for_json(self): """Handle multi-select vs single-select""" if self.multiselect: return super(MultiSelectField, self).for_json() value = self.get_python() if hasattr(value, 'for_json'): return value.for_json() return value
Handle multi-select vs single-select
Below is the the instruction that describes the task: ### Input: Handle multi-select vs single-select ### Response: def for_json(self): """Handle multi-select vs single-select""" if self.multiselect: return super(MultiSelectField, self).for_json() value = self.get_python() if hasattr(value, 'for_json'): return value.for_json() return value
def returnData(self, dsptr): """ " get ip data from db file by data start ptr " param: dsptr """ dataPtr = dsptr & 0x00FFFFFFL dataLen = (dsptr >> 24) & 0xFF self.__f.seek(dataPtr) data = self.__f.read(dataLen) result = data[4:].split('|') location = Location(self.getLong(data, 0), result[0], result[1], result[2], result[3], result[4]) return location
" get ip data from db file by data start ptr " param: dsptr
Below is the the instruction that describes the task: ### Input: " get ip data from db file by data start ptr " param: dsptr ### Response: def returnData(self, dsptr): """ " get ip data from db file by data start ptr " param: dsptr """ dataPtr = dsptr & 0x00FFFFFFL dataLen = (dsptr >> 24) & 0xFF self.__f.seek(dataPtr) data = self.__f.read(dataLen) result = data[4:].split('|') location = Location(self.getLong(data, 0), result[0], result[1], result[2], result[3], result[4]) return location
def batch_process( self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=1 ): """ Process a large batch of tiles. Parameters ---------- process : MapcheteProcess process to be run zoom : list or int either single zoom level or list of minimum and maximum zoom level; None processes all (default: None) tile : tuple zoom, row and column of tile to be processed (cannot be used with zoom) multi : int number of workers (default: number of CPU cores) max_chunksize : int maximum number of process tiles to be queued for each worker; (default: 1) """ list(self.batch_processor(zoom, tile, multi, max_chunksize))
Process a large batch of tiles. Parameters ---------- process : MapcheteProcess process to be run zoom : list or int either single zoom level or list of minimum and maximum zoom level; None processes all (default: None) tile : tuple zoom, row and column of tile to be processed (cannot be used with zoom) multi : int number of workers (default: number of CPU cores) max_chunksize : int maximum number of process tiles to be queued for each worker; (default: 1)
Below is the the instruction that describes the task: ### Input: Process a large batch of tiles. Parameters ---------- process : MapcheteProcess process to be run zoom : list or int either single zoom level or list of minimum and maximum zoom level; None processes all (default: None) tile : tuple zoom, row and column of tile to be processed (cannot be used with zoom) multi : int number of workers (default: number of CPU cores) max_chunksize : int maximum number of process tiles to be queued for each worker; (default: 1) ### Response: def batch_process( self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=1 ): """ Process a large batch of tiles. Parameters ---------- process : MapcheteProcess process to be run zoom : list or int either single zoom level or list of minimum and maximum zoom level; None processes all (default: None) tile : tuple zoom, row and column of tile to be processed (cannot be used with zoom) multi : int number of workers (default: number of CPU cores) max_chunksize : int maximum number of process tiles to be queued for each worker; (default: 1) """ list(self.batch_processor(zoom, tile, multi, max_chunksize))
def parse_widget_name(widget): ''' Parse a alias:block_name string into separate parts. ''' try: alias, block_name = widget.split(':', 1) except ValueError: raise template.TemplateSyntaxError('widget name must be "alias:block_name" - %s' % widget) return alias, block_name
Parse a alias:block_name string into separate parts.
Below is the the instruction that describes the task: ### Input: Parse a alias:block_name string into separate parts. ### Response: def parse_widget_name(widget): ''' Parse a alias:block_name string into separate parts. ''' try: alias, block_name = widget.split(':', 1) except ValueError: raise template.TemplateSyntaxError('widget name must be "alias:block_name" - %s' % widget) return alias, block_name
async def put(self, key, value, *args): """Edits a data entry.""" self._db[key] = value await self.save()
Edits a data entry.
Below is the the instruction that describes the task: ### Input: Edits a data entry. ### Response: async def put(self, key, value, *args): """Edits a data entry.""" self._db[key] = value await self.save()
def _jq_format(code): """ DEPRECATED - Use re.escape() instead, which performs the intended action. Use before throwing raw code such as 'div[tab="advanced"]' into jQuery. Selectors with quotes inside of quotes would otherwise break jQuery. If you just want to escape quotes, there's escape_quotes_if_needed(). This is similar to "json.dumps(value)", but with one less layer of quotes. """ code = code.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n') code = code.replace('\"', '\\\"').replace('\'', '\\\'') code = code.replace('\v', '\\v').replace('\a', '\\a').replace('\f', '\\f') code = code.replace('\b', '\\b').replace(r'\u', '\\u').replace('\r', '\\r') return code
DEPRECATED - Use re.escape() instead, which performs the intended action. Use before throwing raw code such as 'div[tab="advanced"]' into jQuery. Selectors with quotes inside of quotes would otherwise break jQuery. If you just want to escape quotes, there's escape_quotes_if_needed(). This is similar to "json.dumps(value)", but with one less layer of quotes.
Below is the the instruction that describes the task: ### Input: DEPRECATED - Use re.escape() instead, which performs the intended action. Use before throwing raw code such as 'div[tab="advanced"]' into jQuery. Selectors with quotes inside of quotes would otherwise break jQuery. If you just want to escape quotes, there's escape_quotes_if_needed(). This is similar to "json.dumps(value)", but with one less layer of quotes. ### Response: def _jq_format(code): """ DEPRECATED - Use re.escape() instead, which performs the intended action. Use before throwing raw code such as 'div[tab="advanced"]' into jQuery. Selectors with quotes inside of quotes would otherwise break jQuery. If you just want to escape quotes, there's escape_quotes_if_needed(). This is similar to "json.dumps(value)", but with one less layer of quotes. """ code = code.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n') code = code.replace('\"', '\\\"').replace('\'', '\\\'') code = code.replace('\v', '\\v').replace('\a', '\\a').replace('\f', '\\f') code = code.replace('\b', '\\b').replace(r'\u', '\\u').replace('\r', '\\r') return code
def get_bool(self, key, default=None): """ Same as :meth:`dict.get`, but the value is converted to a bool. The boolean value is considered, respectively, :obj:`True` or :obj:`False` if the string is equal, ignoring case, to ``'true'`` or ``'false'``. """ v = self.get(key, default) if v != default: v = v.strip().lower() if v == 'true': v = True elif v == 'false': v = False elif default is None: raise RuntimeError("invalid bool string: %s" % v) else: v = default return v
Same as :meth:`dict.get`, but the value is converted to a bool. The boolean value is considered, respectively, :obj:`True` or :obj:`False` if the string is equal, ignoring case, to ``'true'`` or ``'false'``.
Below is the the instruction that describes the task: ### Input: Same as :meth:`dict.get`, but the value is converted to a bool. The boolean value is considered, respectively, :obj:`True` or :obj:`False` if the string is equal, ignoring case, to ``'true'`` or ``'false'``. ### Response: def get_bool(self, key, default=None): """ Same as :meth:`dict.get`, but the value is converted to a bool. The boolean value is considered, respectively, :obj:`True` or :obj:`False` if the string is equal, ignoring case, to ``'true'`` or ``'false'``. """ v = self.get(key, default) if v != default: v = v.strip().lower() if v == 'true': v = True elif v == 'false': v = False elif default is None: raise RuntimeError("invalid bool string: %s" % v) else: v = default return v
def _binary(self, node): """ Translate a binary node into latex qtree node. :param node: a treebrd node :return: a qtree subtree rooted at the node """ return '[.${op}$ {left} {right} ]'\ .format(op=latex_operator[node.operator], left=self.translate(node.left), right=self.translate(node.right))
Translate a binary node into latex qtree node. :param node: a treebrd node :return: a qtree subtree rooted at the node
Below is the the instruction that describes the task: ### Input: Translate a binary node into latex qtree node. :param node: a treebrd node :return: a qtree subtree rooted at the node ### Response: def _binary(self, node): """ Translate a binary node into latex qtree node. :param node: a treebrd node :return: a qtree subtree rooted at the node """ return '[.${op}$ {left} {right} ]'\ .format(op=latex_operator[node.operator], left=self.translate(node.left), right=self.translate(node.right))
def _replay_info(replay_path): """Query a replay for information.""" if not replay_path.lower().endswith("sc2replay"): print("Must be a replay.") return run_config = run_configs.get() with run_config.start(want_rgb=False) as controller: info = controller.replay_info(run_config.replay_data(replay_path)) print("-" * 60) print(info)
Query a replay for information.
Below is the the instruction that describes the task: ### Input: Query a replay for information. ### Response: def _replay_info(replay_path): """Query a replay for information.""" if not replay_path.lower().endswith("sc2replay"): print("Must be a replay.") return run_config = run_configs.get() with run_config.start(want_rgb=False) as controller: info = controller.replay_info(run_config.replay_data(replay_path)) print("-" * 60) print(info)
def _remote_folder(dirpath, remotes, syn): """Retrieve the remote folder for files, creating if necessary. """ if dirpath in remotes: return remotes[dirpath], remotes else: parent_dir, cur_dir = os.path.split(dirpath) parent_folder, remotes = _remote_folder(parent_dir, remotes, syn) s_cur_dir = syn.store(synapseclient.Folder(cur_dir, parent=parent_folder)) remotes[dirpath] = s_cur_dir.id return s_cur_dir.id, remotes
Retrieve the remote folder for files, creating if necessary.
Below is the the instruction that describes the task: ### Input: Retrieve the remote folder for files, creating if necessary. ### Response: def _remote_folder(dirpath, remotes, syn): """Retrieve the remote folder for files, creating if necessary. """ if dirpath in remotes: return remotes[dirpath], remotes else: parent_dir, cur_dir = os.path.split(dirpath) parent_folder, remotes = _remote_folder(parent_dir, remotes, syn) s_cur_dir = syn.store(synapseclient.Folder(cur_dir, parent=parent_folder)) remotes[dirpath] = s_cur_dir.id return s_cur_dir.id, remotes
def delete(self, bucket: str, key: str): """ Deletes an object in a bucket. If the operation definitely did not delete anything, return False. Any other return value is treated as something was possibly deleted. """ bucket_obj = self._ensure_bucket_loaded(bucket) try: bucket_obj.delete_blob(key) except NotFound: return False
Deletes an object in a bucket. If the operation definitely did not delete anything, return False. Any other return value is treated as something was possibly deleted.
Below is the the instruction that describes the task: ### Input: Deletes an object in a bucket. If the operation definitely did not delete anything, return False. Any other return value is treated as something was possibly deleted. ### Response: def delete(self, bucket: str, key: str): """ Deletes an object in a bucket. If the operation definitely did not delete anything, return False. Any other return value is treated as something was possibly deleted. """ bucket_obj = self._ensure_bucket_loaded(bucket) try: bucket_obj.delete_blob(key) except NotFound: return False
def check_auth(args): """ Checks courseraoauth2client's connectivity to the coursera.org API servers for a specific application """ oauth2_instance = oauth2.build_oauth2(args.app, args) auth = oauth2_instance.build_authorizer() my_profile_url = ( 'https://api.coursera.org/api/externalBasicProfiles.v1?' 'q=me&fields=name' ) r = requests.get(my_profile_url, auth=auth) if r.status_code != 200: logging.error('Received response code %s from the basic profile API.', r.status_code) logging.debug('Response body:\n%s', r.text) sys.exit(1) try: external_id = r.json()['elements'][0]['id'] except: logging.error( 'Could not parse the external id out of the response body %s', r.text) external_id = None try: name = r.json()['elements'][0]['name'] except: logging.error( 'Could not parse the name out of the response body %s', r.text) name = None if not args.quiet > 0: print 'Name: %s' % name print 'External ID: %s' % external_id if name is None or external_id is None: sys.exit(1)
Checks courseraoauth2client's connectivity to the coursera.org API servers for a specific application
Below is the the instruction that describes the task: ### Input: Checks courseraoauth2client's connectivity to the coursera.org API servers for a specific application ### Response: def check_auth(args): """ Checks courseraoauth2client's connectivity to the coursera.org API servers for a specific application """ oauth2_instance = oauth2.build_oauth2(args.app, args) auth = oauth2_instance.build_authorizer() my_profile_url = ( 'https://api.coursera.org/api/externalBasicProfiles.v1?' 'q=me&fields=name' ) r = requests.get(my_profile_url, auth=auth) if r.status_code != 200: logging.error('Received response code %s from the basic profile API.', r.status_code) logging.debug('Response body:\n%s', r.text) sys.exit(1) try: external_id = r.json()['elements'][0]['id'] except: logging.error( 'Could not parse the external id out of the response body %s', r.text) external_id = None try: name = r.json()['elements'][0]['name'] except: logging.error( 'Could not parse the name out of the response body %s', r.text) name = None if not args.quiet > 0: print 'Name: %s' % name print 'External ID: %s' % external_id if name is None or external_id is None: sys.exit(1)
def get_ruptures(self, srcfilter=calc.filters.nofilter): """ :returns: a list of EBRuptures filtered by bounding box """ ebrs = [] with datastore.read(self.filename) as dstore: rupgeoms = dstore['rupgeoms'] for rec in self.rup_array: if srcfilter.integration_distance: sids = srcfilter.close_sids(rec, self.trt, rec['mag']) if len(sids) == 0: # the rupture is far away continue else: sids = None mesh = numpy.zeros((3, rec['sy'], rec['sz']), F32) geom = rupgeoms[rec['gidx1']:rec['gidx2']].reshape( rec['sy'], rec['sz']) mesh[0] = geom['lon'] mesh[1] = geom['lat'] mesh[2] = geom['depth'] rupture_cls, surface_cls = self.code2cls[rec['code']] rupture = object.__new__(rupture_cls) rupture.serial = rec['serial'] rupture.surface = object.__new__(surface_cls) rupture.mag = rec['mag'] rupture.rake = rec['rake'] rupture.hypocenter = geo.Point(*rec['hypo']) rupture.occurrence_rate = rec['occurrence_rate'] rupture.tectonic_region_type = self.trt if surface_cls is geo.PlanarSurface: rupture.surface = geo.PlanarSurface.from_array( mesh[:, 0, :]) elif surface_cls is geo.MultiSurface: # mesh has shape (3, n, 4) rupture.surface.__init__([ geo.PlanarSurface.from_array(mesh[:, i, :]) for i in range(mesh.shape[1])]) elif surface_cls is geo.GriddedSurface: # fault surface, strike and dip will be computed rupture.surface.strike = rupture.surface.dip = None rupture.surface.mesh = Mesh(*mesh) else: # fault surface, strike and dip will be computed rupture.surface.strike = rupture.surface.dip = None rupture.surface.__init__(RectangularMesh(*mesh)) grp_id = rec['grp_id'] ebr = EBRupture(rupture, rec['srcidx'], grp_id, rec['n_occ'], self.samples) # not implemented: rupture_slip_direction ebr.sids = sids ebrs.append(ebr) return ebrs
:returns: a list of EBRuptures filtered by bounding box
Below is the the instruction that describes the task: ### Input: :returns: a list of EBRuptures filtered by bounding box ### Response: def get_ruptures(self, srcfilter=calc.filters.nofilter): """ :returns: a list of EBRuptures filtered by bounding box """ ebrs = [] with datastore.read(self.filename) as dstore: rupgeoms = dstore['rupgeoms'] for rec in self.rup_array: if srcfilter.integration_distance: sids = srcfilter.close_sids(rec, self.trt, rec['mag']) if len(sids) == 0: # the rupture is far away continue else: sids = None mesh = numpy.zeros((3, rec['sy'], rec['sz']), F32) geom = rupgeoms[rec['gidx1']:rec['gidx2']].reshape( rec['sy'], rec['sz']) mesh[0] = geom['lon'] mesh[1] = geom['lat'] mesh[2] = geom['depth'] rupture_cls, surface_cls = self.code2cls[rec['code']] rupture = object.__new__(rupture_cls) rupture.serial = rec['serial'] rupture.surface = object.__new__(surface_cls) rupture.mag = rec['mag'] rupture.rake = rec['rake'] rupture.hypocenter = geo.Point(*rec['hypo']) rupture.occurrence_rate = rec['occurrence_rate'] rupture.tectonic_region_type = self.trt if surface_cls is geo.PlanarSurface: rupture.surface = geo.PlanarSurface.from_array( mesh[:, 0, :]) elif surface_cls is geo.MultiSurface: # mesh has shape (3, n, 4) rupture.surface.__init__([ geo.PlanarSurface.from_array(mesh[:, i, :]) for i in range(mesh.shape[1])]) elif surface_cls is geo.GriddedSurface: # fault surface, strike and dip will be computed rupture.surface.strike = rupture.surface.dip = None rupture.surface.mesh = Mesh(*mesh) else: # fault surface, strike and dip will be computed rupture.surface.strike = rupture.surface.dip = None rupture.surface.__init__(RectangularMesh(*mesh)) grp_id = rec['grp_id'] ebr = EBRupture(rupture, rec['srcidx'], grp_id, rec['n_occ'], self.samples) # not implemented: rupture_slip_direction ebr.sids = sids ebrs.append(ebr) return ebrs
def define(self, value, lineno, namespace=None): """ Defines label value. It can be anything. Even an AST """ if self.defined: error(lineno, "label '%s' already defined at line %i" % (self.name, self.lineno)) self.value = value self.lineno = lineno self.namespace = NAMESPACE if namespace is None else namespace
Defines label value. It can be anything. Even an AST
Below is the the instruction that describes the task: ### Input: Defines label value. It can be anything. Even an AST ### Response: def define(self, value, lineno, namespace=None): """ Defines label value. It can be anything. Even an AST """ if self.defined: error(lineno, "label '%s' already defined at line %i" % (self.name, self.lineno)) self.value = value self.lineno = lineno self.namespace = NAMESPACE if namespace is None else namespace
def _read_file(name, encoding='utf-8') -> str: """ Read the contents of a file. :param name: The name of the file in the current directory. :param encoding: The encoding of the file; defaults to utf-8. :return: The contents of the file. """ with open(name, encoding=encoding) as f: return f.read()
Read the contents of a file. :param name: The name of the file in the current directory. :param encoding: The encoding of the file; defaults to utf-8. :return: The contents of the file.
Below is the the instruction that describes the task: ### Input: Read the contents of a file. :param name: The name of the file in the current directory. :param encoding: The encoding of the file; defaults to utf-8. :return: The contents of the file. ### Response: def _read_file(name, encoding='utf-8') -> str: """ Read the contents of a file. :param name: The name of the file in the current directory. :param encoding: The encoding of the file; defaults to utf-8. :return: The contents of the file. """ with open(name, encoding=encoding) as f: return f.read()
def _simple_expand(template, seq): """ seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE seq[-1] IS THE CURRENT CONTEXT """ def replacer(found): ops = found.group(1).split("|") path = ops[0] var = path.lstrip(".") depth = min(len(seq), max(1, len(path) - len(var))) try: val = seq[-depth] if var: if is_sequence(val) and float(var) == _round(float(var), 0): val = val[int(var)] else: val = val[var] for func_name in ops[1:]: parts = func_name.split('(') if len(parts) > 1: val = eval(parts[0] + "(val, " + ("(".join(parts[1::]))) else: val = FORMATTERS[func_name](val) val = toString(val) return val except Exception as e: from mo_logs import Except e = Except.wrap(e) try: if e.message.find("is not JSON serializable"): # WORK HARDER val = toString(val) return val except Exception as f: if not _Log: _late_import() _Log.warning( "Can not expand " + "|".join(ops) + " in template: {{template_|json}}", template_=template, cause=e ) return "[template expansion error: (" + str(e.message) + ")]" return _variable_pattern.sub(replacer, template)
seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE seq[-1] IS THE CURRENT CONTEXT
Below is the the instruction that describes the task: ### Input: seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE seq[-1] IS THE CURRENT CONTEXT ### Response: def _simple_expand(template, seq): """ seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE seq[-1] IS THE CURRENT CONTEXT """ def replacer(found): ops = found.group(1).split("|") path = ops[0] var = path.lstrip(".") depth = min(len(seq), max(1, len(path) - len(var))) try: val = seq[-depth] if var: if is_sequence(val) and float(var) == _round(float(var), 0): val = val[int(var)] else: val = val[var] for func_name in ops[1:]: parts = func_name.split('(') if len(parts) > 1: val = eval(parts[0] + "(val, " + ("(".join(parts[1::]))) else: val = FORMATTERS[func_name](val) val = toString(val) return val except Exception as e: from mo_logs import Except e = Except.wrap(e) try: if e.message.find("is not JSON serializable"): # WORK HARDER val = toString(val) return val except Exception as f: if not _Log: _late_import() _Log.warning( "Can not expand " + "|".join(ops) + " in template: {{template_|json}}", template_=template, cause=e ) return "[template expansion error: (" + str(e.message) + ")]" return _variable_pattern.sub(replacer, template)
def create_function_f_y(self): """output function""" return ca.Function( 'y', [self.t, self.x, self.m, self.p, self.c, self.ng, self.nu], [self.y_rhs], ['t', 'x', 'm', 'p', 'c', 'ng', 'nu'], ['y'], self.func_opt)
output function
Below is the the instruction that describes the task: ### Input: output function ### Response: def create_function_f_y(self): """output function""" return ca.Function( 'y', [self.t, self.x, self.m, self.p, self.c, self.ng, self.nu], [self.y_rhs], ['t', 'x', 'm', 'p', 'c', 'ng', 'nu'], ['y'], self.func_opt)
def _sub8(ins): """ Pops last 2 bytes from the stack and subtract them. Then push the result onto the stack. Top-1 of the stack is subtracted Top _sub8 t1, a, b === t1 <-- a - b Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If 1st operand is 0, then just do a NEG * If any of the operands is 1, then DEC is used * If any of the operands is -1 (255), then INC is used """ op1, op2 = tuple(ins.quad[2:]) if is_int(op2): # 2nd operand op2 = int8(op2) output = _8bit_oper(op1) if op2 == 0: output.append('push af') return output # A - 0 = A op2 = int8(op2) if op2 == 1: # A - 1 == DEC A output.append('dec a') output.append('push af') return output if op2 == 0xFF: # A - (-1) == INC A output.append('inc a') output.append('push af') return output output.append('sub %i' % op2) output.append('push af') return output if is_int(op1): # 1st operand is numeric? if int8(op1) == 0: # 0 - A = -A ==> NEG A output = _8bit_oper(op2) output.append('neg') output.append('push af') return output # At this point, even if 1st operand is numeric, proceed # normally if op2[0] == '_': # Optimization when 2nd operand is an id rev = True op1, op2 = op2, op1 else: rev = False output = _8bit_oper(op1, op2, rev) output.append('sub h') output.append('push af') return output
Pops last 2 bytes from the stack and subtract them. Then push the result onto the stack. Top-1 of the stack is subtracted Top _sub8 t1, a, b === t1 <-- a - b Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If 1st operand is 0, then just do a NEG * If any of the operands is 1, then DEC is used * If any of the operands is -1 (255), then INC is used
Below is the the instruction that describes the task: ### Input: Pops last 2 bytes from the stack and subtract them. Then push the result onto the stack. Top-1 of the stack is subtracted Top _sub8 t1, a, b === t1 <-- a - b Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If 1st operand is 0, then just do a NEG * If any of the operands is 1, then DEC is used * If any of the operands is -1 (255), then INC is used ### Response: def _sub8(ins): """ Pops last 2 bytes from the stack and subtract them. Then push the result onto the stack. Top-1 of the stack is subtracted Top _sub8 t1, a, b === t1 <-- a - b Optimizations: * If 2nd op is ZERO, then do NOTHING: A - 0 = A * If 1st operand is 0, then just do a NEG * If any of the operands is 1, then DEC is used * If any of the operands is -1 (255), then INC is used """ op1, op2 = tuple(ins.quad[2:]) if is_int(op2): # 2nd operand op2 = int8(op2) output = _8bit_oper(op1) if op2 == 0: output.append('push af') return output # A - 0 = A op2 = int8(op2) if op2 == 1: # A - 1 == DEC A output.append('dec a') output.append('push af') return output if op2 == 0xFF: # A - (-1) == INC A output.append('inc a') output.append('push af') return output output.append('sub %i' % op2) output.append('push af') return output if is_int(op1): # 1st operand is numeric? if int8(op1) == 0: # 0 - A = -A ==> NEG A output = _8bit_oper(op2) output.append('neg') output.append('push af') return output # At this point, even if 1st operand is numeric, proceed # normally if op2[0] == '_': # Optimization when 2nd operand is an id rev = True op1, op2 = op2, op1 else: rev = False output = _8bit_oper(op1, op2, rev) output.append('sub h') output.append('push af') return output
def example_2_load_data(self): """ 加载数据 """ # 权重向量, w1代表神经网络的第一层,w2代表神经网络的第二层 self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1)) self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1)) # 特征向量, 区别是,这里不会在计算图中生成节点 #self.x = placeholder(float32, shape=(1, 2), name='input') self.x = placeholder(float32, shape=(3, 2), name='input')
加载数据
Below is the the instruction that describes the task: ### Input: 加载数据 ### Response: def example_2_load_data(self): """ 加载数据 """ # 权重向量, w1代表神经网络的第一层,w2代表神经网络的第二层 self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1)) self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1)) # 特征向量, 区别是,这里不会在计算图中生成节点 #self.x = placeholder(float32, shape=(1, 2), name='input') self.x = placeholder(float32, shape=(3, 2), name='input')
def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0): """Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed """ data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth } s, metadata = self.GET('getRegisterUserInfo', data) if s is True: self.useridx = metadata['useridx'] return True, metadata else: return False, metadata
Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed
Below is the the instruction that describes the task: ### Input: Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed ### Response: def getRegisterUserInfo(self, svctype = "Android NDrive App ver", auth = 0): """Retrieve information about useridx :param svctype: Information about the platform you are using right now. :param auth: Authentication type :return: ``True`` when success or ``False`` when failed """ data = {'userid': self.user_id, 'svctype': svctype, 'auth': auth } s, metadata = self.GET('getRegisterUserInfo', data) if s is True: self.useridx = metadata['useridx'] return True, metadata else: return False, metadata
def get_option(self, optionname, default=0): """Returns the first value that matches 'optionname' in parameters passed in via the command line or set via set_option or via the global_plugin_options dictionary, in that order. optionaname may be iterable, in which case the first option that matches any of the option names is returned. """ global_options = ('verify', 'all_logs', 'log_size', 'plugin_timeout') if optionname in global_options: return getattr(self.commons['cmdlineopts'], optionname) for name, parms in zip(self.opt_names, self.opt_parms): if name == optionname: val = parms['enabled'] if val is not None: return val return default
Returns the first value that matches 'optionname' in parameters passed in via the command line or set via set_option or via the global_plugin_options dictionary, in that order. optionaname may be iterable, in which case the first option that matches any of the option names is returned.
Below is the the instruction that describes the task: ### Input: Returns the first value that matches 'optionname' in parameters passed in via the command line or set via set_option or via the global_plugin_options dictionary, in that order. optionaname may be iterable, in which case the first option that matches any of the option names is returned. ### Response: def get_option(self, optionname, default=0): """Returns the first value that matches 'optionname' in parameters passed in via the command line or set via set_option or via the global_plugin_options dictionary, in that order. optionaname may be iterable, in which case the first option that matches any of the option names is returned. """ global_options = ('verify', 'all_logs', 'log_size', 'plugin_timeout') if optionname in global_options: return getattr(self.commons['cmdlineopts'], optionname) for name, parms in zip(self.opt_names, self.opt_parms): if name == optionname: val = parms['enabled'] if val is not None: return val return default
async def parse_request(req): """ Parses and validates request :param req: :return: """ async def validate_activity(activity: Activity): if not isinstance(activity.type, str): raise TypeError('BotFrameworkAdapter.parse_request(): invalid or missing activity type.') return True if not isinstance(req, Activity): # If the req is a raw HTTP Request, try to deserialize it into an Activity and return the Activity. if hasattr(req, 'body'): try: activity = Activity().deserialize(req.body) is_valid_activity = await validate_activity(activity) if is_valid_activity: return activity except Exception as e: raise e elif 'body' in req: try: activity = Activity().deserialize(req['body']) is_valid_activity = await validate_activity(activity) if is_valid_activity: return activity except Exception as e: raise e else: raise TypeError('BotFrameworkAdapter.parse_request(): received invalid request') else: # The `req` has already been deserialized to an Activity, so verify the Activity.type and return it. is_valid_activity = await validate_activity(req) if is_valid_activity: return req
Parses and validates request :param req: :return:
Below is the the instruction that describes the task: ### Input: Parses and validates request :param req: :return: ### Response: async def parse_request(req): """ Parses and validates request :param req: :return: """ async def validate_activity(activity: Activity): if not isinstance(activity.type, str): raise TypeError('BotFrameworkAdapter.parse_request(): invalid or missing activity type.') return True if not isinstance(req, Activity): # If the req is a raw HTTP Request, try to deserialize it into an Activity and return the Activity. if hasattr(req, 'body'): try: activity = Activity().deserialize(req.body) is_valid_activity = await validate_activity(activity) if is_valid_activity: return activity except Exception as e: raise e elif 'body' in req: try: activity = Activity().deserialize(req['body']) is_valid_activity = await validate_activity(activity) if is_valid_activity: return activity except Exception as e: raise e else: raise TypeError('BotFrameworkAdapter.parse_request(): received invalid request') else: # The `req` has already been deserialized to an Activity, so verify the Activity.type and return it. is_valid_activity = await validate_activity(req) if is_valid_activity: return req
def _rand_sparse(m, n, density, format='csr'): """Construct base function for sprand, sprandn.""" nnz = max(min(int(m*n*density), m*n), 0) row = np.random.randint(low=0, high=m-1, size=nnz) col = np.random.randint(low=0, high=n-1, size=nnz) data = np.ones(nnz, dtype=float) # duplicate (i,j) entries will be summed together return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n))
Construct base function for sprand, sprandn.
Below is the the instruction that describes the task: ### Input: Construct base function for sprand, sprandn. ### Response: def _rand_sparse(m, n, density, format='csr'): """Construct base function for sprand, sprandn.""" nnz = max(min(int(m*n*density), m*n), 0) row = np.random.randint(low=0, high=m-1, size=nnz) col = np.random.randint(low=0, high=n-1, size=nnz) data = np.ones(nnz, dtype=float) # duplicate (i,j) entries will be summed together return sp.sparse.csr_matrix((data, (row, col)), shape=(m, n))
def login_user(user, remember=False, duration=None, force=False, fresh=True): ''' Logs a user in. You should pass the actual user object to this. If the user's `is_active` property is ``False``, they will not be logged in unless `force` is ``True``. This will return ``True`` if the log in attempt succeeds, and ``False`` if it fails (i.e. because the user is inactive). :param user: The user object to log in. :type user: object :param remember: Whether to remember the user after their session expires. Defaults to ``False``. :type remember: bool :param duration: The amount of time before the remember cookie expires. If ``None`` the value set in the settings is used. Defaults to ``None``. :type duration: :class:`datetime.timedelta` :param force: If the user is inactive, setting this to ``True`` will log them in regardless. Defaults to ``False``. :type force: bool :param fresh: setting this to ``False`` will log in the user with a session marked as not "fresh". Defaults to ``True``. :type fresh: bool ''' if not force and not user.is_active: return False user_id = getattr(user, current_app.login_manager.id_attribute)() session['user_id'] = user_id session['_fresh'] = fresh session['_id'] = current_app.login_manager._session_identifier_generator() if remember: session['remember'] = 'set' if duration is not None: try: # equal to timedelta.total_seconds() but works with Python 2.6 session['remember_seconds'] = (duration.microseconds + (duration.seconds + duration.days * 24 * 3600) * 10**6) / 10.0**6 except AttributeError: raise Exception('duration must be a datetime.timedelta, ' 'instead got: {0}'.format(duration)) current_app.login_manager._update_request_context_with_user(user) user_logged_in.send(current_app._get_current_object(), user=_get_user()) return True
Logs a user in. You should pass the actual user object to this. If the user's `is_active` property is ``False``, they will not be logged in unless `force` is ``True``. This will return ``True`` if the log in attempt succeeds, and ``False`` if it fails (i.e. because the user is inactive). :param user: The user object to log in. :type user: object :param remember: Whether to remember the user after their session expires. Defaults to ``False``. :type remember: bool :param duration: The amount of time before the remember cookie expires. If ``None`` the value set in the settings is used. Defaults to ``None``. :type duration: :class:`datetime.timedelta` :param force: If the user is inactive, setting this to ``True`` will log them in regardless. Defaults to ``False``. :type force: bool :param fresh: setting this to ``False`` will log in the user with a session marked as not "fresh". Defaults to ``True``. :type fresh: bool
Below is the the instruction that describes the task: ### Input: Logs a user in. You should pass the actual user object to this. If the user's `is_active` property is ``False``, they will not be logged in unless `force` is ``True``. This will return ``True`` if the log in attempt succeeds, and ``False`` if it fails (i.e. because the user is inactive). :param user: The user object to log in. :type user: object :param remember: Whether to remember the user after their session expires. Defaults to ``False``. :type remember: bool :param duration: The amount of time before the remember cookie expires. If ``None`` the value set in the settings is used. Defaults to ``None``. :type duration: :class:`datetime.timedelta` :param force: If the user is inactive, setting this to ``True`` will log them in regardless. Defaults to ``False``. :type force: bool :param fresh: setting this to ``False`` will log in the user with a session marked as not "fresh". Defaults to ``True``. :type fresh: bool ### Response: def login_user(user, remember=False, duration=None, force=False, fresh=True): ''' Logs a user in. You should pass the actual user object to this. If the user's `is_active` property is ``False``, they will not be logged in unless `force` is ``True``. This will return ``True`` if the log in attempt succeeds, and ``False`` if it fails (i.e. because the user is inactive). :param user: The user object to log in. :type user: object :param remember: Whether to remember the user after their session expires. Defaults to ``False``. :type remember: bool :param duration: The amount of time before the remember cookie expires. If ``None`` the value set in the settings is used. Defaults to ``None``. :type duration: :class:`datetime.timedelta` :param force: If the user is inactive, setting this to ``True`` will log them in regardless. Defaults to ``False``. :type force: bool :param fresh: setting this to ``False`` will log in the user with a session marked as not "fresh". Defaults to ``True``. :type fresh: bool ''' if not force and not user.is_active: return False user_id = getattr(user, current_app.login_manager.id_attribute)() session['user_id'] = user_id session['_fresh'] = fresh session['_id'] = current_app.login_manager._session_identifier_generator() if remember: session['remember'] = 'set' if duration is not None: try: # equal to timedelta.total_seconds() but works with Python 2.6 session['remember_seconds'] = (duration.microseconds + (duration.seconds + duration.days * 24 * 3600) * 10**6) / 10.0**6 except AttributeError: raise Exception('duration must be a datetime.timedelta, ' 'instead got: {0}'.format(duration)) current_app.login_manager._update_request_context_with_user(user) user_logged_in.send(current_app._get_current_object(), user=_get_user()) return True
def precision(y_true, y_score, k=None, return_bounds=False): """ If return_bounds is False then returns precision on the labeled examples in the top k. If return_bounds is True the returns a tuple containing: - precision on the labeled examples in the top k - number of labeled examples in the top k - lower bound of precision in the top k, assuming all unlabaled examples are False - upper bound of precision in the top k, assuming all unlabaled examples are True """ y_true, y_score = to_float(y_true, y_score) top = _argtop(y_score, k) n = np.nan_to_num(y_true[top]).sum() # fill missing labels with 0 d = (~np.isnan(y_true[top])).sum() # count number of labels p = n/d if return_bounds: k = len(y_true) if k is None else k bounds = (n/k, (n+k-d)/k) if k != 0 else (np.nan, np.nan) return p, d, bounds[0], bounds[1] else: return p
If return_bounds is False then returns precision on the labeled examples in the top k. If return_bounds is True the returns a tuple containing: - precision on the labeled examples in the top k - number of labeled examples in the top k - lower bound of precision in the top k, assuming all unlabaled examples are False - upper bound of precision in the top k, assuming all unlabaled examples are True
Below is the the instruction that describes the task: ### Input: If return_bounds is False then returns precision on the labeled examples in the top k. If return_bounds is True the returns a tuple containing: - precision on the labeled examples in the top k - number of labeled examples in the top k - lower bound of precision in the top k, assuming all unlabaled examples are False - upper bound of precision in the top k, assuming all unlabaled examples are True ### Response: def precision(y_true, y_score, k=None, return_bounds=False): """ If return_bounds is False then returns precision on the labeled examples in the top k. If return_bounds is True the returns a tuple containing: - precision on the labeled examples in the top k - number of labeled examples in the top k - lower bound of precision in the top k, assuming all unlabaled examples are False - upper bound of precision in the top k, assuming all unlabaled examples are True """ y_true, y_score = to_float(y_true, y_score) top = _argtop(y_score, k) n = np.nan_to_num(y_true[top]).sum() # fill missing labels with 0 d = (~np.isnan(y_true[top])).sum() # count number of labels p = n/d if return_bounds: k = len(y_true) if k is None else k bounds = (n/k, (n+k-d)/k) if k != 0 else (np.nan, np.nan) return p, d, bounds[0], bounds[1] else: return p
def resolve_to_objects_or_project(path, all_matching_results=False): ''' :param path: Path to resolve :type path: string :param all_matching_results: Whether to return a list of all matching results :type all_matching_results: boolean A thin wrapper over :meth:`resolve_existing_path` which throws an error if the path does not look like a project and doesn't match a data object path. Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). ''' # Attempt to resolve name project, folderpath, entity_results = resolve_existing_path(path, expected='entity', allow_mult=True, all_mult=all_matching_results) if entity_results is None and not is_container_id(path): if folderpath != None and folderpath != '/': raise ResolutionError('Could not resolve "' + path + \ '''" to an existing data object or to only a project; if you were attempting to refer to a project by name, please append a colon ":" to indicate that it is a project.''') return project, folderpath, entity_results
:param path: Path to resolve :type path: string :param all_matching_results: Whether to return a list of all matching results :type all_matching_results: boolean A thin wrapper over :meth:`resolve_existing_path` which throws an error if the path does not look like a project and doesn't match a data object path. Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error).
Below is the the instruction that describes the task: ### Input: :param path: Path to resolve :type path: string :param all_matching_results: Whether to return a list of all matching results :type all_matching_results: boolean A thin wrapper over :meth:`resolve_existing_path` which throws an error if the path does not look like a project and doesn't match a data object path. Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). ### Response: def resolve_to_objects_or_project(path, all_matching_results=False): ''' :param path: Path to resolve :type path: string :param all_matching_results: Whether to return a list of all matching results :type all_matching_results: boolean A thin wrapper over :meth:`resolve_existing_path` which throws an error if the path does not look like a project and doesn't match a data object path. Returns either a list of results or a single result (depending on how many is expected; if only one, then an interactive picking of a choice will be initiated if input is a tty, or else throw an error). ''' # Attempt to resolve name project, folderpath, entity_results = resolve_existing_path(path, expected='entity', allow_mult=True, all_mult=all_matching_results) if entity_results is None and not is_container_id(path): if folderpath != None and folderpath != '/': raise ResolutionError('Could not resolve "' + path + \ '''" to an existing data object or to only a project; if you were attempting to refer to a project by name, please append a colon ":" to indicate that it is a project.''') return project, folderpath, entity_results
def connect(self, opt): """This sets up the tokens we expect to see in a way that hvac also expects.""" if not self._kwargs['verify']: LOG.warning('Skipping SSL Validation!') self.version = self.server_version() self.token = self.init_token() my_token = self.lookup_token() if not my_token or 'data' not in my_token: raise aomi.exceptions.AomiCredentials('initial token') display_name = my_token['data']['display_name'] vsn_string = "" if self.version: vsn_string = ", v%s" % self.version else: LOG.warning("Unable to deterine Vault version. Not all " "functionality is supported") LOG.info("Connected to %s as %s%s", self._url, display_name, vsn_string) if opt.reuse_token: LOG.debug("Not creating operational token") self.initial_token = self.token self.operational_token = self.token else: self.initial_token = self.token self.operational_token = self.op_token(display_name, opt) if not self.is_authenticated(): raise aomi.exceptions.AomiCredentials('operational token') self.token = self.operational_token return self
This sets up the tokens we expect to see in a way that hvac also expects.
Below is the the instruction that describes the task: ### Input: This sets up the tokens we expect to see in a way that hvac also expects. ### Response: def connect(self, opt): """This sets up the tokens we expect to see in a way that hvac also expects.""" if not self._kwargs['verify']: LOG.warning('Skipping SSL Validation!') self.version = self.server_version() self.token = self.init_token() my_token = self.lookup_token() if not my_token or 'data' not in my_token: raise aomi.exceptions.AomiCredentials('initial token') display_name = my_token['data']['display_name'] vsn_string = "" if self.version: vsn_string = ", v%s" % self.version else: LOG.warning("Unable to deterine Vault version. Not all " "functionality is supported") LOG.info("Connected to %s as %s%s", self._url, display_name, vsn_string) if opt.reuse_token: LOG.debug("Not creating operational token") self.initial_token = self.token self.operational_token = self.token else: self.initial_token = self.token self.operational_token = self.op_token(display_name, opt) if not self.is_authenticated(): raise aomi.exceptions.AomiCredentials('operational token') self.token = self.operational_token return self
def isFrameRange(frange): """ Return True if the given string is a frame range. Any padding characters, such as '#' and '@' are ignored. Args: frange (str): a frame range to test Returns: bool: """ # we're willing to trim padding characters from consideration # this translation is orders of magnitude faster than prior method frange = str(frange).translate(None, ''.join(PAD_MAP.keys())) if not frange: return True for part in frange.split(','): if not part: continue try: FrameSet._parse_frange_part(part) except ParseException: return False return True
Return True if the given string is a frame range. Any padding characters, such as '#' and '@' are ignored. Args: frange (str): a frame range to test Returns: bool:
Below is the the instruction that describes the task: ### Input: Return True if the given string is a frame range. Any padding characters, such as '#' and '@' are ignored. Args: frange (str): a frame range to test Returns: bool: ### Response: def isFrameRange(frange): """ Return True if the given string is a frame range. Any padding characters, such as '#' and '@' are ignored. Args: frange (str): a frame range to test Returns: bool: """ # we're willing to trim padding characters from consideration # this translation is orders of magnitude faster than prior method frange = str(frange).translate(None, ''.join(PAD_MAP.keys())) if not frange: return True for part in frange.split(','): if not part: continue try: FrameSet._parse_frange_part(part) except ParseException: return False return True
def repo(name, keyid=None, env=None, use_passphrase=False, gnupghome='/etc/salt/gpgkeys', runas='builder', timeout=15.0): ''' Make a package repository and optionally sign it and packages present The name is directory to turn into a repo. This state is best used with onchanges linked to your package building states. name The directory to find packages that will be in the repository keyid .. versionchanged:: 2016.3.0 Optional Key ID to use in signing packages and repository. Utilizes Public and Private keys associated with keyid which have been loaded into the minion's Pillar data. For example, contents from a Pillar data file with named Public and Private keys as follows: .. code-block:: yaml gpg_pkg_priv_key: | -----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1 lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX =JvW8 -----END PGP PRIVATE KEY BLOCK----- gpg_pkg_priv_keyname: gpg_pkg_key.pem gpg_pkg_pub_key: | -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP 4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki inNqW9c= =s1CX -----END PGP PUBLIC KEY BLOCK----- gpg_pkg_pub_keyname: gpg_pkg_key.pub env .. versionchanged:: 2016.3.0 A dictionary of environment variables to be utilized in creating the repository. Example: .. code-block:: yaml - env: OPTIONS: 'ask-passphrase' .. warning:: The above illustrates a common ``PyYAML`` pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) ``PyYAML`` idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. Use of ``OPTIONS`` on some platforms, for example: ``ask-passphrase``, will require ``gpg-agent`` or similar to cache passphrases. .. note:: This parameter is not used for making ``yum`` repositories. use_passphrase : False .. versionadded:: 2016.3.0 Use a passphrase with the signing key presented in ``keyid``. Passphrase is received from Pillar data which could be passed on the command line with ``pillar`` parameter. For example: .. code-block:: bash pillar='{ "gpg_passphrase" : "my_passphrase" }' gnupghome : /etc/salt/gpgkeys .. versionadded:: 2016.3.0 Location where GPG related files are stored, used with 'keyid' runas : builder .. versionadded:: 2016.3.0 User to create the repository as, and optionally sign packages. .. note:: Ensure the user has correct permissions to any files and directories which are to be utilized. timeout : 15.0 .. versionadded:: 2016.3.4 Timeout in seconds to wait for the prompt for inputting the passphrase. ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if __opts__['test'] is True: ret['result'] = None ret['comment'] = 'Package repo metadata at {0} will be refreshed'.format(name) return ret # Need the check for None here, if env is not provided then it falls back # to None and it is assumed that the environment is not being overridden. if env is not None and not isinstance(env, dict): ret['comment'] = ('Invalidly-formatted \'env\' parameter. See ' 'documentation.') return ret func = 'pkgbuild.make_repo' if __grains__.get('os_family', False) not in ('RedHat', 'Suse'): for file in os.listdir(name): if file.endswith('.rpm'): func = 'rpmbuild.make_repo' break res = __salt__[func](name, keyid, env, use_passphrase, gnupghome, runas, timeout) if res['retcode'] > 0: ret['result'] = False else: ret['changes'] = {'refresh': True} if res['stdout'] and res['stderr']: ret['comment'] = "{0}\n{1}".format(res['stdout'], res['stderr']) elif res['stdout']: ret['comment'] = res['stdout'] elif res['stderr']: ret['comment'] = res['stderr'] return ret
Make a package repository and optionally sign it and packages present The name is directory to turn into a repo. This state is best used with onchanges linked to your package building states. name The directory to find packages that will be in the repository keyid .. versionchanged:: 2016.3.0 Optional Key ID to use in signing packages and repository. Utilizes Public and Private keys associated with keyid which have been loaded into the minion's Pillar data. For example, contents from a Pillar data file with named Public and Private keys as follows: .. code-block:: yaml gpg_pkg_priv_key: | -----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1 lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX =JvW8 -----END PGP PRIVATE KEY BLOCK----- gpg_pkg_priv_keyname: gpg_pkg_key.pem gpg_pkg_pub_key: | -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP 4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki inNqW9c= =s1CX -----END PGP PUBLIC KEY BLOCK----- gpg_pkg_pub_keyname: gpg_pkg_key.pub env .. versionchanged:: 2016.3.0 A dictionary of environment variables to be utilized in creating the repository. Example: .. code-block:: yaml - env: OPTIONS: 'ask-passphrase' .. warning:: The above illustrates a common ``PyYAML`` pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) ``PyYAML`` idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. Use of ``OPTIONS`` on some platforms, for example: ``ask-passphrase``, will require ``gpg-agent`` or similar to cache passphrases. .. note:: This parameter is not used for making ``yum`` repositories. use_passphrase : False .. versionadded:: 2016.3.0 Use a passphrase with the signing key presented in ``keyid``. Passphrase is received from Pillar data which could be passed on the command line with ``pillar`` parameter. For example: .. code-block:: bash pillar='{ "gpg_passphrase" : "my_passphrase" }' gnupghome : /etc/salt/gpgkeys .. versionadded:: 2016.3.0 Location where GPG related files are stored, used with 'keyid' runas : builder .. versionadded:: 2016.3.0 User to create the repository as, and optionally sign packages. .. note:: Ensure the user has correct permissions to any files and directories which are to be utilized. timeout : 15.0 .. versionadded:: 2016.3.4 Timeout in seconds to wait for the prompt for inputting the passphrase.
Below is the the instruction that describes the task: ### Input: Make a package repository and optionally sign it and packages present The name is directory to turn into a repo. This state is best used with onchanges linked to your package building states. name The directory to find packages that will be in the repository keyid .. versionchanged:: 2016.3.0 Optional Key ID to use in signing packages and repository. Utilizes Public and Private keys associated with keyid which have been loaded into the minion's Pillar data. For example, contents from a Pillar data file with named Public and Private keys as follows: .. code-block:: yaml gpg_pkg_priv_key: | -----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1 lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX =JvW8 -----END PGP PRIVATE KEY BLOCK----- gpg_pkg_priv_keyname: gpg_pkg_key.pem gpg_pkg_pub_key: | -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP 4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki inNqW9c= =s1CX -----END PGP PUBLIC KEY BLOCK----- gpg_pkg_pub_keyname: gpg_pkg_key.pub env .. versionchanged:: 2016.3.0 A dictionary of environment variables to be utilized in creating the repository. Example: .. code-block:: yaml - env: OPTIONS: 'ask-passphrase' .. warning:: The above illustrates a common ``PyYAML`` pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) ``PyYAML`` idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. Use of ``OPTIONS`` on some platforms, for example: ``ask-passphrase``, will require ``gpg-agent`` or similar to cache passphrases. .. note:: This parameter is not used for making ``yum`` repositories. use_passphrase : False .. versionadded:: 2016.3.0 Use a passphrase with the signing key presented in ``keyid``. Passphrase is received from Pillar data which could be passed on the command line with ``pillar`` parameter. For example: .. code-block:: bash pillar='{ "gpg_passphrase" : "my_passphrase" }' gnupghome : /etc/salt/gpgkeys .. versionadded:: 2016.3.0 Location where GPG related files are stored, used with 'keyid' runas : builder .. versionadded:: 2016.3.0 User to create the repository as, and optionally sign packages. .. note:: Ensure the user has correct permissions to any files and directories which are to be utilized. timeout : 15.0 .. versionadded:: 2016.3.4 Timeout in seconds to wait for the prompt for inputting the passphrase. ### Response: def repo(name, keyid=None, env=None, use_passphrase=False, gnupghome='/etc/salt/gpgkeys', runas='builder', timeout=15.0): ''' Make a package repository and optionally sign it and packages present The name is directory to turn into a repo. This state is best used with onchanges linked to your package building states. name The directory to find packages that will be in the repository keyid .. versionchanged:: 2016.3.0 Optional Key ID to use in signing packages and repository. Utilizes Public and Private keys associated with keyid which have been loaded into the minion's Pillar data. For example, contents from a Pillar data file with named Public and Private keys as follows: .. code-block:: yaml gpg_pkg_priv_key: | -----BEGIN PGP PRIVATE KEY BLOCK----- Version: GnuPG v1 lQO+BFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . Ebe+8JCQTwqSXPRTzXmy/b5WXDeM79CkLWvuGpXFor76D+ECMRPv/rawukEcNptn R5OmgHqvydEnO4pWbn8JzQO9YX/Us0SMHBVzLC8eIi5ZIopzalvX =JvW8 -----END PGP PRIVATE KEY BLOCK----- gpg_pkg_priv_keyname: gpg_pkg_key.pem gpg_pkg_pub_key: | -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mQENBFciIfQBCADAPCtzx7I5Rl32escCMZsPzaEKWe7bIX1em4KCKkBoX47IG54b w82PCE8Y1jF/9Uk2m3RKVWp3YcLlc7Ap3gj6VO4ysvVz28UbnhPxsIkOlf2cq8qc . . bYP7t5iwJmQzRMyFInYRt77wkJBPCpJc9FPNebL9vlZcN4zv0KQta+4alcWivvoP 4QIxE+/+trC6QRw2m2dHk6aAeq/J0Sc7ilZufwnNA71hf9SzRIwcFXMsLx4iLlki inNqW9c= =s1CX -----END PGP PUBLIC KEY BLOCK----- gpg_pkg_pub_keyname: gpg_pkg_key.pub env .. versionchanged:: 2016.3.0 A dictionary of environment variables to be utilized in creating the repository. Example: .. code-block:: yaml - env: OPTIONS: 'ask-passphrase' .. warning:: The above illustrates a common ``PyYAML`` pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) ``PyYAML`` idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. Use of ``OPTIONS`` on some platforms, for example: ``ask-passphrase``, will require ``gpg-agent`` or similar to cache passphrases. .. note:: This parameter is not used for making ``yum`` repositories. use_passphrase : False .. versionadded:: 2016.3.0 Use a passphrase with the signing key presented in ``keyid``. Passphrase is received from Pillar data which could be passed on the command line with ``pillar`` parameter. For example: .. code-block:: bash pillar='{ "gpg_passphrase" : "my_passphrase" }' gnupghome : /etc/salt/gpgkeys .. versionadded:: 2016.3.0 Location where GPG related files are stored, used with 'keyid' runas : builder .. versionadded:: 2016.3.0 User to create the repository as, and optionally sign packages. .. note:: Ensure the user has correct permissions to any files and directories which are to be utilized. timeout : 15.0 .. versionadded:: 2016.3.4 Timeout in seconds to wait for the prompt for inputting the passphrase. ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if __opts__['test'] is True: ret['result'] = None ret['comment'] = 'Package repo metadata at {0} will be refreshed'.format(name) return ret # Need the check for None here, if env is not provided then it falls back # to None and it is assumed that the environment is not being overridden. if env is not None and not isinstance(env, dict): ret['comment'] = ('Invalidly-formatted \'env\' parameter. See ' 'documentation.') return ret func = 'pkgbuild.make_repo' if __grains__.get('os_family', False) not in ('RedHat', 'Suse'): for file in os.listdir(name): if file.endswith('.rpm'): func = 'rpmbuild.make_repo' break res = __salt__[func](name, keyid, env, use_passphrase, gnupghome, runas, timeout) if res['retcode'] > 0: ret['result'] = False else: ret['changes'] = {'refresh': True} if res['stdout'] and res['stderr']: ret['comment'] = "{0}\n{1}".format(res['stdout'], res['stderr']) elif res['stdout']: ret['comment'] = res['stdout'] elif res['stderr']: ret['comment'] = res['stderr'] return ret
def add_gt_proposals(self, proposals, targets): """ Arguments: proposals: list[BoxList] targets: list[BoxList] """ # Get the device we're operating on device = proposals[0].bbox.device gt_boxes = [target.copy_with_fields([]) for target in targets] # later cat of bbox requires all fields to be present for all bbox # so we need to add a dummy for objectness that's missing for gt_box in gt_boxes: gt_box.add_field("objectness", torch.ones(len(gt_box), device=device)) proposals = [ cat_boxlist((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes) ] return proposals
Arguments: proposals: list[BoxList] targets: list[BoxList]
Below is the the instruction that describes the task: ### Input: Arguments: proposals: list[BoxList] targets: list[BoxList] ### Response: def add_gt_proposals(self, proposals, targets): """ Arguments: proposals: list[BoxList] targets: list[BoxList] """ # Get the device we're operating on device = proposals[0].bbox.device gt_boxes = [target.copy_with_fields([]) for target in targets] # later cat of bbox requires all fields to be present for all bbox # so we need to add a dummy for objectness that's missing for gt_box in gt_boxes: gt_box.add_field("objectness", torch.ones(len(gt_box), device=device)) proposals = [ cat_boxlist((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes) ] return proposals
def gfoclt(occtyp, front, fshape, fframe, back, bshape, bframe, abcorr, obsrvr, step, cnfine, result=None): """ Determine time intervals when an observer sees one target occulted by, or in transit across, another. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfoclt_c.html :param occtyp: Type of occultation. :type occtyp: str :param front: Name of body occulting the other. :type front: str :param fshape: Type of shape model used for front body. :type fshape: str :param fframe: Body-fixed, body-centered frame for front body. :type fframe: str :param back: Name of body occulted by the other. :type back: str :param bshape: Type of shape model used for back body. :type bshape: str :param bframe: Body-fixed, body-centered frame for back body. :type bframe: str :param abcorr: Aberration correction flag. :type abcorr: str :param obsrvr: Name of the observing body. :type obsrvr: str :param step: Step size in seconds for finding occultation events. :type step: float :param cnfine: SPICE window to which the search is restricted. :type cnfine: spiceypy.utils.support_types.SpiceCell :param result: Optional SPICE window containing results. :type result: spiceypy.utils.support_types.SpiceCell """ assert isinstance(cnfine, stypes.SpiceCell) assert cnfine.is_double() if result is None: result = stypes.SPICEDOUBLE_CELL(2000) else: assert isinstance(result, stypes.SpiceCell) assert result.is_double() occtyp = stypes.stringToCharP(occtyp) front = stypes.stringToCharP(front) fshape = stypes.stringToCharP(fshape) fframe = stypes.stringToCharP(fframe) back = stypes.stringToCharP(back) bshape = stypes.stringToCharP(bshape) bframe = stypes.stringToCharP(bframe) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) step = ctypes.c_double(step) libspice.gfoclt_c(occtyp, front, fshape, fframe, back, bshape, bframe, abcorr, obsrvr, step, ctypes.byref(cnfine), ctypes.byref(result)) return result
Determine time intervals when an observer sees one target occulted by, or in transit across, another. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfoclt_c.html :param occtyp: Type of occultation. :type occtyp: str :param front: Name of body occulting the other. :type front: str :param fshape: Type of shape model used for front body. :type fshape: str :param fframe: Body-fixed, body-centered frame for front body. :type fframe: str :param back: Name of body occulted by the other. :type back: str :param bshape: Type of shape model used for back body. :type bshape: str :param bframe: Body-fixed, body-centered frame for back body. :type bframe: str :param abcorr: Aberration correction flag. :type abcorr: str :param obsrvr: Name of the observing body. :type obsrvr: str :param step: Step size in seconds for finding occultation events. :type step: float :param cnfine: SPICE window to which the search is restricted. :type cnfine: spiceypy.utils.support_types.SpiceCell :param result: Optional SPICE window containing results. :type result: spiceypy.utils.support_types.SpiceCell
Below is the the instruction that describes the task: ### Input: Determine time intervals when an observer sees one target occulted by, or in transit across, another. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfoclt_c.html :param occtyp: Type of occultation. :type occtyp: str :param front: Name of body occulting the other. :type front: str :param fshape: Type of shape model used for front body. :type fshape: str :param fframe: Body-fixed, body-centered frame for front body. :type fframe: str :param back: Name of body occulted by the other. :type back: str :param bshape: Type of shape model used for back body. :type bshape: str :param bframe: Body-fixed, body-centered frame for back body. :type bframe: str :param abcorr: Aberration correction flag. :type abcorr: str :param obsrvr: Name of the observing body. :type obsrvr: str :param step: Step size in seconds for finding occultation events. :type step: float :param cnfine: SPICE window to which the search is restricted. :type cnfine: spiceypy.utils.support_types.SpiceCell :param result: Optional SPICE window containing results. :type result: spiceypy.utils.support_types.SpiceCell ### Response: def gfoclt(occtyp, front, fshape, fframe, back, bshape, bframe, abcorr, obsrvr, step, cnfine, result=None): """ Determine time intervals when an observer sees one target occulted by, or in transit across, another. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/gfoclt_c.html :param occtyp: Type of occultation. :type occtyp: str :param front: Name of body occulting the other. :type front: str :param fshape: Type of shape model used for front body. :type fshape: str :param fframe: Body-fixed, body-centered frame for front body. :type fframe: str :param back: Name of body occulted by the other. :type back: str :param bshape: Type of shape model used for back body. :type bshape: str :param bframe: Body-fixed, body-centered frame for back body. :type bframe: str :param abcorr: Aberration correction flag. :type abcorr: str :param obsrvr: Name of the observing body. :type obsrvr: str :param step: Step size in seconds for finding occultation events. :type step: float :param cnfine: SPICE window to which the search is restricted. :type cnfine: spiceypy.utils.support_types.SpiceCell :param result: Optional SPICE window containing results. :type result: spiceypy.utils.support_types.SpiceCell """ assert isinstance(cnfine, stypes.SpiceCell) assert cnfine.is_double() if result is None: result = stypes.SPICEDOUBLE_CELL(2000) else: assert isinstance(result, stypes.SpiceCell) assert result.is_double() occtyp = stypes.stringToCharP(occtyp) front = stypes.stringToCharP(front) fshape = stypes.stringToCharP(fshape) fframe = stypes.stringToCharP(fframe) back = stypes.stringToCharP(back) bshape = stypes.stringToCharP(bshape) bframe = stypes.stringToCharP(bframe) abcorr = stypes.stringToCharP(abcorr) obsrvr = stypes.stringToCharP(obsrvr) step = ctypes.c_double(step) libspice.gfoclt_c(occtyp, front, fshape, fframe, back, bshape, bframe, abcorr, obsrvr, step, ctypes.byref(cnfine), ctypes.byref(result)) return result
def tag(version, params): """Build and return full command to use with subprocess.Popen for 'git tag' command :param version: :param params: :return: list """ cmd = ['git', 'tag', '-a', '-m', 'v%s' % version, str(version)] if params: cmd.extend(params) return cmd
Build and return full command to use with subprocess.Popen for 'git tag' command :param version: :param params: :return: list
Below is the the instruction that describes the task: ### Input: Build and return full command to use with subprocess.Popen for 'git tag' command :param version: :param params: :return: list ### Response: def tag(version, params): """Build and return full command to use with subprocess.Popen for 'git tag' command :param version: :param params: :return: list """ cmd = ['git', 'tag', '-a', '-m', 'v%s' % version, str(version)] if params: cmd.extend(params) return cmd
def make_relative_to_root(path): """Update options so that defaults are user relative to specified pex_root.""" return os.path.normpath(path.format(pex_root=ENV.PEX_ROOT))
Update options so that defaults are user relative to specified pex_root.
Below is the the instruction that describes the task: ### Input: Update options so that defaults are user relative to specified pex_root. ### Response: def make_relative_to_root(path): """Update options so that defaults are user relative to specified pex_root.""" return os.path.normpath(path.format(pex_root=ENV.PEX_ROOT))
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH): """The length of an elliptical large_arc segment requires numerical integration, and in that case it's simpler to just do a geometric approximation, as for cubic bezier curves.""" assert 0 <= t0 <= 1 and 0 <= t1 <= 1 if _quad_available: return quad(lambda tau: abs(self.derivative(tau)), t0, t1, epsabs=error, limit=1000)[0] else: return segment_length(self, t0, t1, self.point(t0), self.point(t1), error, min_depth, 0)
The length of an elliptical large_arc segment requires numerical integration, and in that case it's simpler to just do a geometric approximation, as for cubic bezier curves.
Below is the the instruction that describes the task: ### Input: The length of an elliptical large_arc segment requires numerical integration, and in that case it's simpler to just do a geometric approximation, as for cubic bezier curves. ### Response: def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH): """The length of an elliptical large_arc segment requires numerical integration, and in that case it's simpler to just do a geometric approximation, as for cubic bezier curves.""" assert 0 <= t0 <= 1 and 0 <= t1 <= 1 if _quad_available: return quad(lambda tau: abs(self.derivative(tau)), t0, t1, epsabs=error, limit=1000)[0] else: return segment_length(self, t0, t1, self.point(t0), self.point(t1), error, min_depth, 0)
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame: """Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate. """ series = table[self.name] self._check_series_name(series) validators = self.validators results = pd.DataFrame({validator: series for validator in validators}, index=series.index) for name, func in validators.items(): results[name] = func(results[name]) results['dtype'] = self._validate_series_dtype(series) if self.unique: results['unique'] = v.funcs.unique(series) if failed_only: results = find_failed_rows(results) return results
Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate.
Below is the the instruction that describes the task: ### Input: Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate. ### Response: def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame: """Return a dataframe of validation results for the appropriate series vs the vector of validators. Args: table (pd.DataFrame): A dataframe on which to apply validation logic. failed_only (bool): If ``True``: return only the indexes that failed to validate. """ series = table[self.name] self._check_series_name(series) validators = self.validators results = pd.DataFrame({validator: series for validator in validators}, index=series.index) for name, func in validators.items(): results[name] = func(results[name]) results['dtype'] = self._validate_series_dtype(series) if self.unique: results['unique'] = v.funcs.unique(series) if failed_only: results = find_failed_rows(results) return results
def read_block_idb(self, block, _): """Interface Description Block""" options = block[16:] tsresol = 1000000 while len(options) >= 4: code, length = struct.unpack(self.endian + "HH", options[:4]) # PCAP Next Generation (pcapng) Capture File Format # 4.2. - Interface Description Block # http://xml2rfc.tools.ietf.org/cgi-bin/xml2rfc.cgi?url=https://raw.githubusercontent.com/pcapng/pcapng/master/draft-tuexen-opsawg-pcapng.xml&modeAsFormat=html/ascii&type=ascii#rfc.section.4.2 if code == 9 and length == 1 and len(options) >= 5: tsresol = orb(options[4]) tsresol = (2 if tsresol & 128 else 10) ** (tsresol & 127) if code == 0: if length != 0: warning("PcapNg: invalid option length %d for end-of-option" % length) # noqa: E501 break if length % 4: length += (4 - (length % 4)) options = options[4 + length:] self.interfaces.append(struct.unpack(self.endian + "HxxI", block[:8]) + (tsresol,))
Interface Description Block
Below is the the instruction that describes the task: ### Input: Interface Description Block ### Response: def read_block_idb(self, block, _): """Interface Description Block""" options = block[16:] tsresol = 1000000 while len(options) >= 4: code, length = struct.unpack(self.endian + "HH", options[:4]) # PCAP Next Generation (pcapng) Capture File Format # 4.2. - Interface Description Block # http://xml2rfc.tools.ietf.org/cgi-bin/xml2rfc.cgi?url=https://raw.githubusercontent.com/pcapng/pcapng/master/draft-tuexen-opsawg-pcapng.xml&modeAsFormat=html/ascii&type=ascii#rfc.section.4.2 if code == 9 and length == 1 and len(options) >= 5: tsresol = orb(options[4]) tsresol = (2 if tsresol & 128 else 10) ** (tsresol & 127) if code == 0: if length != 0: warning("PcapNg: invalid option length %d for end-of-option" % length) # noqa: E501 break if length % 4: length += (4 - (length % 4)) options = options[4 + length:] self.interfaces.append(struct.unpack(self.endian + "HxxI", block[:8]) + (tsresol,))
def _set_vni_mask(self, v, load=False): """ Setter method for vni_mask, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/vni_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_vni_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vni_mask() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'0|[1-9a-fA-F][0-9a-fA-F]{0,5}'}), is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vni mask:Hexadecimal 0..FFFFFF', u'display-when': u'not(../vni-any)', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vni_mask must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'0|[1-9a-fA-F][0-9a-fA-F]{0,5}'}), is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vni mask:Hexadecimal 0..FFFFFF', u'display-when': u'not(../vni-any)', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='string', is_config=True)""", }) self.__vni_mask = t if hasattr(self, '_set'): self._set()
Setter method for vni_mask, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/vni_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_vni_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vni_mask() directly.
Below is the the instruction that describes the task: ### Input: Setter method for vni_mask, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/vni_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_vni_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vni_mask() directly. ### Response: def _set_vni_mask(self, v, load=False): """ Setter method for vni_mask, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/vni_mask (string) If this variable is read-only (config: false) in the source YANG file, then _set_vni_mask is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vni_mask() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'0|[1-9a-fA-F][0-9a-fA-F]{0,5}'}), is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vni mask:Hexadecimal 0..FFFFFF', u'display-when': u'not(../vni-any)', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vni_mask must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'0|[1-9a-fA-F][0-9a-fA-F]{0,5}'}), is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'vni mask:Hexadecimal 0..FFFFFF', u'display-when': u'not(../vni-any)', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='string', is_config=True)""", }) self.__vni_mask = t if hasattr(self, '_set'): self._set()
def encode(self, pdu): """Encode a tag on the end of the PDU.""" # check for special encoding if (self.tagClass == Tag.contextTagClass): data = 0x08 elif (self.tagClass == Tag.openingTagClass): data = 0x0E elif (self.tagClass == Tag.closingTagClass): data = 0x0F else: data = 0x00 # encode the tag number part if (self.tagNumber < 15): data += (self.tagNumber << 4) else: data += 0xF0 # encode the length/value/type part if (self.tagLVT < 5): data += self.tagLVT else: data += 0x05 # save this and the extended tag value pdu.put( data ) if (self.tagNumber >= 15): pdu.put(self.tagNumber) # really short lengths are already done if (self.tagLVT >= 5): if (self.tagLVT <= 253): pdu.put( self.tagLVT ) elif (self.tagLVT <= 65535): pdu.put( 254 ) pdu.put_short( self.tagLVT ) else: pdu.put( 255 ) pdu.put_long( self.tagLVT ) # now put the data pdu.put_data(self.tagData)
Encode a tag on the end of the PDU.
Below is the the instruction that describes the task: ### Input: Encode a tag on the end of the PDU. ### Response: def encode(self, pdu): """Encode a tag on the end of the PDU.""" # check for special encoding if (self.tagClass == Tag.contextTagClass): data = 0x08 elif (self.tagClass == Tag.openingTagClass): data = 0x0E elif (self.tagClass == Tag.closingTagClass): data = 0x0F else: data = 0x00 # encode the tag number part if (self.tagNumber < 15): data += (self.tagNumber << 4) else: data += 0xF0 # encode the length/value/type part if (self.tagLVT < 5): data += self.tagLVT else: data += 0x05 # save this and the extended tag value pdu.put( data ) if (self.tagNumber >= 15): pdu.put(self.tagNumber) # really short lengths are already done if (self.tagLVT >= 5): if (self.tagLVT <= 253): pdu.put( self.tagLVT ) elif (self.tagLVT <= 65535): pdu.put( 254 ) pdu.put_short( self.tagLVT ) else: pdu.put( 255 ) pdu.put_long( self.tagLVT ) # now put the data pdu.put_data(self.tagData)
def create_model(samples_x, samples_y_aggregation, n_restarts_optimizer=250, is_white_kernel=False): ''' Trains GP regression model ''' kernel = gp.kernels.ConstantKernel(constant_value=1, constant_value_bounds=(1e-12, 1e12)) * \ gp.kernels.Matern(nu=1.5) if is_white_kernel is True: kernel += gp.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-12, 1e12)) regressor = gp.GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, normalize_y=True, alpha=1e-10) regressor.fit(numpy.array(samples_x), numpy.array(samples_y_aggregation)) model = {} model['model'] = regressor model['kernel_prior'] = str(kernel) model['kernel_posterior'] = str(regressor.kernel_) model['model_loglikelihood'] = regressor.log_marginal_likelihood(regressor.kernel_.theta) return model
Trains GP regression model
Below is the the instruction that describes the task: ### Input: Trains GP regression model ### Response: def create_model(samples_x, samples_y_aggregation, n_restarts_optimizer=250, is_white_kernel=False): ''' Trains GP regression model ''' kernel = gp.kernels.ConstantKernel(constant_value=1, constant_value_bounds=(1e-12, 1e12)) * \ gp.kernels.Matern(nu=1.5) if is_white_kernel is True: kernel += gp.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-12, 1e12)) regressor = gp.GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, normalize_y=True, alpha=1e-10) regressor.fit(numpy.array(samples_x), numpy.array(samples_y_aggregation)) model = {} model['model'] = regressor model['kernel_prior'] = str(kernel) model['kernel_posterior'] = str(regressor.kernel_) model['model_loglikelihood'] = regressor.log_marginal_likelihood(regressor.kernel_.theta) return model
def _add_slide_footer(self, slide_no): """Add the slide footer to the output if enabled.""" if self.builder.config.slide_footer: self.body.append( '\n<div class="slide-footer">%s</div>\n' % ( self.builder.config.slide_footer, ), )
Add the slide footer to the output if enabled.
Below is the the instruction that describes the task: ### Input: Add the slide footer to the output if enabled. ### Response: def _add_slide_footer(self, slide_no): """Add the slide footer to the output if enabled.""" if self.builder.config.slide_footer: self.body.append( '\n<div class="slide-footer">%s</div>\n' % ( self.builder.config.slide_footer, ), )
def p_variable_declaration_list(self, p): """ variable_declaration_list \ : variable_declaration | variable_declaration_list COMMA variable_declaration """ if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[3]) p[0] = p[1]
variable_declaration_list \ : variable_declaration | variable_declaration_list COMMA variable_declaration
Below is the the instruction that describes the task: ### Input: variable_declaration_list \ : variable_declaration | variable_declaration_list COMMA variable_declaration ### Response: def p_variable_declaration_list(self, p): """ variable_declaration_list \ : variable_declaration | variable_declaration_list COMMA variable_declaration """ if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[3]) p[0] = p[1]
def main(): """Command line interface for the ``coloredlogs`` program.""" actions = [] try: # Parse the command line arguments. options, arguments = getopt.getopt(sys.argv[1:], 'cdh', [ 'convert', 'to-html', 'demo', 'help', ]) # Map command line options to actions. for option, value in options: if option in ('-c', '--convert', '--to-html'): actions.append(functools.partial(convert_command_output, *arguments)) arguments = [] elif option in ('-d', '--demo'): actions.append(demonstrate_colored_logging) elif option in ('-h', '--help'): usage(__doc__) return else: assert False, "Programming error: Unhandled option!" if not actions: usage(__doc__) return except Exception as e: warning("Error: %s", e) sys.exit(1) for function in actions: function()
Command line interface for the ``coloredlogs`` program.
Below is the the instruction that describes the task: ### Input: Command line interface for the ``coloredlogs`` program. ### Response: def main(): """Command line interface for the ``coloredlogs`` program.""" actions = [] try: # Parse the command line arguments. options, arguments = getopt.getopt(sys.argv[1:], 'cdh', [ 'convert', 'to-html', 'demo', 'help', ]) # Map command line options to actions. for option, value in options: if option in ('-c', '--convert', '--to-html'): actions.append(functools.partial(convert_command_output, *arguments)) arguments = [] elif option in ('-d', '--demo'): actions.append(demonstrate_colored_logging) elif option in ('-h', '--help'): usage(__doc__) return else: assert False, "Programming error: Unhandled option!" if not actions: usage(__doc__) return except Exception as e: warning("Error: %s", e) sys.exit(1) for function in actions: function()
def register_view(self): """ Display registration form and create new User.""" safe_next_url = self._get_safe_next_url('next', self.USER_AFTER_LOGIN_ENDPOINT) safe_reg_next_url = self._get_safe_next_url('reg_next', self.USER_AFTER_REGISTER_ENDPOINT) # Initialize form login_form = self.LoginFormClass() # for login_or_register.html register_form = self.RegisterFormClass(request.form) # for register.html # invite token used to determine validity of registeree invite_token = request.values.get("token") # require invite without a token should disallow the user from registering if self.USER_REQUIRE_INVITATION and not invite_token: flash("Registration is invite only", "error") return redirect(url_for('user.login')) user_invitation = None if invite_token and self.db_manager.UserInvitationClass: data_items = self.token_manager.verify_token(invite_token, self.USER_INVITE_EXPIRATION) if data_items: user_invitation_id = data_items[0] user_invitation = self.db_manager.get_user_invitation_by_id(user_invitation_id) if not user_invitation: flash("Invalid invitation token", "error") return redirect(url_for('user.login')) register_form.invite_token.data = invite_token if request.method != 'POST': login_form.next.data = register_form.next.data = safe_next_url login_form.reg_next.data = register_form.reg_next.data = safe_reg_next_url if user_invitation: register_form.email.data = user_invitation.email # Process valid POST if request.method == 'POST' and register_form.validate(): user = self.db_manager.add_user() register_form.populate_obj(user) user_email = self.db_manager.add_user_email(user=user, is_primary=True) register_form.populate_obj(user_email) # Store password hash instead of password user.password = self.hash_password(user.password) # Email confirmation depends on the USER_ENABLE_CONFIRM_EMAIL setting request_email_confirmation = self.USER_ENABLE_CONFIRM_EMAIL # Users that register through an invitation, can skip this process # but only when they register with an email that matches their invitation. if user_invitation: if user_invitation.email.lower() == register_form.email.data.lower(): user_email.email_confirmed_at=datetime.utcnow() request_email_confirmation = False self.db_manager.save_user_and_user_email(user, user_email) self.db_manager.commit() # Send 'registered' email and delete new User object if send fails if self.USER_SEND_REGISTERED_EMAIL: try: # Send 'confirm email' or 'registered' email self._send_registered_email(user, user_email, request_email_confirmation) except Exception as e: # delete new User object if send fails self.db_manager.delete_object(user) self.db_manager.commit() raise # Send user_registered signal signals.user_registered.send(current_app._get_current_object(), user=user, user_invitation=user_invitation) # Redirect if USER_ENABLE_CONFIRM_EMAIL is set if self.USER_ENABLE_CONFIRM_EMAIL and request_email_confirmation: safe_reg_next_url = self.make_safe_url(register_form.reg_next.data) return redirect(safe_reg_next_url) # Auto-login after register or redirect to login page if 'reg_next' in request.args: safe_reg_next_url = self.make_safe_url(register_form.reg_next.data) else: safe_reg_next_url = self._endpoint_url(self.USER_AFTER_CONFIRM_ENDPOINT) if self.USER_AUTO_LOGIN_AFTER_REGISTER: return self._do_login_user(user, safe_reg_next_url) # auto-login else: return redirect(url_for('user.login') + '?next=' + quote(safe_reg_next_url)) # redirect to login page # Render form self.prepare_domain_translations() return render_template(self.USER_REGISTER_TEMPLATE, form=register_form, login_form=login_form, register_form=register_form)
Display registration form and create new User.
Below is the the instruction that describes the task: ### Input: Display registration form and create new User. ### Response: def register_view(self): """ Display registration form and create new User.""" safe_next_url = self._get_safe_next_url('next', self.USER_AFTER_LOGIN_ENDPOINT) safe_reg_next_url = self._get_safe_next_url('reg_next', self.USER_AFTER_REGISTER_ENDPOINT) # Initialize form login_form = self.LoginFormClass() # for login_or_register.html register_form = self.RegisterFormClass(request.form) # for register.html # invite token used to determine validity of registeree invite_token = request.values.get("token") # require invite without a token should disallow the user from registering if self.USER_REQUIRE_INVITATION and not invite_token: flash("Registration is invite only", "error") return redirect(url_for('user.login')) user_invitation = None if invite_token and self.db_manager.UserInvitationClass: data_items = self.token_manager.verify_token(invite_token, self.USER_INVITE_EXPIRATION) if data_items: user_invitation_id = data_items[0] user_invitation = self.db_manager.get_user_invitation_by_id(user_invitation_id) if not user_invitation: flash("Invalid invitation token", "error") return redirect(url_for('user.login')) register_form.invite_token.data = invite_token if request.method != 'POST': login_form.next.data = register_form.next.data = safe_next_url login_form.reg_next.data = register_form.reg_next.data = safe_reg_next_url if user_invitation: register_form.email.data = user_invitation.email # Process valid POST if request.method == 'POST' and register_form.validate(): user = self.db_manager.add_user() register_form.populate_obj(user) user_email = self.db_manager.add_user_email(user=user, is_primary=True) register_form.populate_obj(user_email) # Store password hash instead of password user.password = self.hash_password(user.password) # Email confirmation depends on the USER_ENABLE_CONFIRM_EMAIL setting request_email_confirmation = self.USER_ENABLE_CONFIRM_EMAIL # Users that register through an invitation, can skip this process # but only when they register with an email that matches their invitation. if user_invitation: if user_invitation.email.lower() == register_form.email.data.lower(): user_email.email_confirmed_at=datetime.utcnow() request_email_confirmation = False self.db_manager.save_user_and_user_email(user, user_email) self.db_manager.commit() # Send 'registered' email and delete new User object if send fails if self.USER_SEND_REGISTERED_EMAIL: try: # Send 'confirm email' or 'registered' email self._send_registered_email(user, user_email, request_email_confirmation) except Exception as e: # delete new User object if send fails self.db_manager.delete_object(user) self.db_manager.commit() raise # Send user_registered signal signals.user_registered.send(current_app._get_current_object(), user=user, user_invitation=user_invitation) # Redirect if USER_ENABLE_CONFIRM_EMAIL is set if self.USER_ENABLE_CONFIRM_EMAIL and request_email_confirmation: safe_reg_next_url = self.make_safe_url(register_form.reg_next.data) return redirect(safe_reg_next_url) # Auto-login after register or redirect to login page if 'reg_next' in request.args: safe_reg_next_url = self.make_safe_url(register_form.reg_next.data) else: safe_reg_next_url = self._endpoint_url(self.USER_AFTER_CONFIRM_ENDPOINT) if self.USER_AUTO_LOGIN_AFTER_REGISTER: return self._do_login_user(user, safe_reg_next_url) # auto-login else: return redirect(url_for('user.login') + '?next=' + quote(safe_reg_next_url)) # redirect to login page # Render form self.prepare_domain_translations() return render_template(self.USER_REGISTER_TEMPLATE, form=register_form, login_form=login_form, register_form=register_form)
def commit(ctx, commands, blank, check, sync, comment, confirm, at_time): """ Execute a commit against the device. Purpose: This function will send set commands to a device, and commit | the changes. Options exist for confirming, comments, | synchronizing, checking, blank commits, or delaying to a later | time/date. @param ctx: The click context paramter, for receiving the object dictionary | being manipulated by other previous functions. Needed by any | function with the @click.pass_context decorator. @type ctx: click.Context @param commands: String containing the set command to be sent to the | device. It can be a python list of strings, a single set | command, a comma separated string of commands, or a | string filepath pointing to a file with set commands | on each line. @type commands: str or list @param blank: A bool set to true to only make a blank commit. A blank | commit makes a commit, but doesn't have any set commands | associated with it, so no changes are made, but a commit | does happen. @type blank: bool @param check: A bool set to true to only run a commit check, and not | commit any changes. Useful for checking syntax of set | commands. @type check: bool @param sync: A bool set to true to sync the commit across both REs. @type sync: bool @param comment: A string that will be logged to the commit log | describing the commit. @type comment: str @param confirm: An integer of seconds to commit confirm for. @type confirm: int @param at_time: A string containing the time or time and date of when | the commit should happen. Junos is expecting one of two | formats: | A time value of the form hh:mm[:ss] (hours, minutes, | and optionally seconds) | A date and time value of the form yyyy-mm-dd hh:mm[:ss] | (year, month, date, hours, minutes, and optionally | seconds) @type at_time: str @returns: None. Functions part of click relating to the command group | 'main' do not return anything. Click handles passing context | between the functions and maintaing command order and chaining. """ if not blank and commands == 'annotate system ""': raise click.BadParameter("--blank and the commands argument cannot" " both be omitted.") mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2) for ip in ctx.obj['hosts']: mp_pool.apply_async(wrap.open_connection, args=(ip, ctx.obj['conn']['username'], ctx.obj['conn']['password'], wrap.commit, [commands, check, sync, comment, confirm, ctx.obj['at_time'], blank], ctx.obj['out'], ctx.obj['conn']['connect_timeout'], ctx.obj['conn']['session_timeout'], ctx.obj['conn']['port']), callback=write_out) mp_pool.close() mp_pool.join()
Execute a commit against the device. Purpose: This function will send set commands to a device, and commit | the changes. Options exist for confirming, comments, | synchronizing, checking, blank commits, or delaying to a later | time/date. @param ctx: The click context paramter, for receiving the object dictionary | being manipulated by other previous functions. Needed by any | function with the @click.pass_context decorator. @type ctx: click.Context @param commands: String containing the set command to be sent to the | device. It can be a python list of strings, a single set | command, a comma separated string of commands, or a | string filepath pointing to a file with set commands | on each line. @type commands: str or list @param blank: A bool set to true to only make a blank commit. A blank | commit makes a commit, but doesn't have any set commands | associated with it, so no changes are made, but a commit | does happen. @type blank: bool @param check: A bool set to true to only run a commit check, and not | commit any changes. Useful for checking syntax of set | commands. @type check: bool @param sync: A bool set to true to sync the commit across both REs. @type sync: bool @param comment: A string that will be logged to the commit log | describing the commit. @type comment: str @param confirm: An integer of seconds to commit confirm for. @type confirm: int @param at_time: A string containing the time or time and date of when | the commit should happen. Junos is expecting one of two | formats: | A time value of the form hh:mm[:ss] (hours, minutes, | and optionally seconds) | A date and time value of the form yyyy-mm-dd hh:mm[:ss] | (year, month, date, hours, minutes, and optionally | seconds) @type at_time: str @returns: None. Functions part of click relating to the command group | 'main' do not return anything. Click handles passing context | between the functions and maintaing command order and chaining.
Below is the the instruction that describes the task: ### Input: Execute a commit against the device. Purpose: This function will send set commands to a device, and commit | the changes. Options exist for confirming, comments, | synchronizing, checking, blank commits, or delaying to a later | time/date. @param ctx: The click context paramter, for receiving the object dictionary | being manipulated by other previous functions. Needed by any | function with the @click.pass_context decorator. @type ctx: click.Context @param commands: String containing the set command to be sent to the | device. It can be a python list of strings, a single set | command, a comma separated string of commands, or a | string filepath pointing to a file with set commands | on each line. @type commands: str or list @param blank: A bool set to true to only make a blank commit. A blank | commit makes a commit, but doesn't have any set commands | associated with it, so no changes are made, but a commit | does happen. @type blank: bool @param check: A bool set to true to only run a commit check, and not | commit any changes. Useful for checking syntax of set | commands. @type check: bool @param sync: A bool set to true to sync the commit across both REs. @type sync: bool @param comment: A string that will be logged to the commit log | describing the commit. @type comment: str @param confirm: An integer of seconds to commit confirm for. @type confirm: int @param at_time: A string containing the time or time and date of when | the commit should happen. Junos is expecting one of two | formats: | A time value of the form hh:mm[:ss] (hours, minutes, | and optionally seconds) | A date and time value of the form yyyy-mm-dd hh:mm[:ss] | (year, month, date, hours, minutes, and optionally | seconds) @type at_time: str @returns: None. Functions part of click relating to the command group | 'main' do not return anything. Click handles passing context | between the functions and maintaing command order and chaining. ### Response: def commit(ctx, commands, blank, check, sync, comment, confirm, at_time): """ Execute a commit against the device. Purpose: This function will send set commands to a device, and commit | the changes. Options exist for confirming, comments, | synchronizing, checking, blank commits, or delaying to a later | time/date. @param ctx: The click context paramter, for receiving the object dictionary | being manipulated by other previous functions. Needed by any | function with the @click.pass_context decorator. @type ctx: click.Context @param commands: String containing the set command to be sent to the | device. It can be a python list of strings, a single set | command, a comma separated string of commands, or a | string filepath pointing to a file with set commands | on each line. @type commands: str or list @param blank: A bool set to true to only make a blank commit. A blank | commit makes a commit, but doesn't have any set commands | associated with it, so no changes are made, but a commit | does happen. @type blank: bool @param check: A bool set to true to only run a commit check, and not | commit any changes. Useful for checking syntax of set | commands. @type check: bool @param sync: A bool set to true to sync the commit across both REs. @type sync: bool @param comment: A string that will be logged to the commit log | describing the commit. @type comment: str @param confirm: An integer of seconds to commit confirm for. @type confirm: int @param at_time: A string containing the time or time and date of when | the commit should happen. Junos is expecting one of two | formats: | A time value of the form hh:mm[:ss] (hours, minutes, | and optionally seconds) | A date and time value of the form yyyy-mm-dd hh:mm[:ss] | (year, month, date, hours, minutes, and optionally | seconds) @type at_time: str @returns: None. Functions part of click relating to the command group | 'main' do not return anything. Click handles passing context | between the functions and maintaing command order and chaining. """ if not blank and commands == 'annotate system ""': raise click.BadParameter("--blank and the commands argument cannot" " both be omitted.") mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2) for ip in ctx.obj['hosts']: mp_pool.apply_async(wrap.open_connection, args=(ip, ctx.obj['conn']['username'], ctx.obj['conn']['password'], wrap.commit, [commands, check, sync, comment, confirm, ctx.obj['at_time'], blank], ctx.obj['out'], ctx.obj['conn']['connect_timeout'], ctx.obj['conn']['session_timeout'], ctx.obj['conn']['port']), callback=write_out) mp_pool.close() mp_pool.join()
def translate_to_english_phonetic_alphabet(self, hide_stress_mark=False): ''' 转换成英音。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return: ''' translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else "" for phoneme in self._phoneme_list: translations += phoneme.english return translations
转换成英音。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return:
Below is the the instruction that describes the task: ### Input: 转换成英音。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return: ### Response: def translate_to_english_phonetic_alphabet(self, hide_stress_mark=False): ''' 转换成英音。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return: ''' translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else "" for phoneme in self._phoneme_list: translations += phoneme.english return translations
def get_dict(cls): """ Return dictionary with conspect / subconspect info. """ mdt = cls.get() if not mdt: return {} return conspectus.subs_by_mdt.get(mdt, {})
Return dictionary with conspect / subconspect info.
Below is the the instruction that describes the task: ### Input: Return dictionary with conspect / subconspect info. ### Response: def get_dict(cls): """ Return dictionary with conspect / subconspect info. """ mdt = cls.get() if not mdt: return {} return conspectus.subs_by_mdt.get(mdt, {})
def findLibrary(name): """ Look for a library in the system. Emulate the algorithm used by dlopen. `name`must include the prefix, e.g. ``libpython2.4.so`` """ assert is_unix, "Current implementation for Unix only (Linux, Solaris, AIX)" lib = None # Look in the LD_LIBRARY_PATH lp = compat.getenv('LD_LIBRARY_PATH', '') for path in lp.split(os.pathsep): libs = glob(os.path.join(path, name + '*')) if libs: lib = libs[0] break # Look in /etc/ld.so.cache if lib is None: expr = r'/[^\(\)\s]*%s\.[^\(\)\s]*' % re.escape(name) m = re.search(expr, compat.exec_command('/sbin/ldconfig', '-p')) if m: lib = m.group(0) # Look in the known safe paths if lib is None: paths = ['/lib', '/usr/lib'] if is_aix: paths.append('/opt/freeware/lib') for path in paths: libs = glob(os.path.join(path, name + '*')) if libs: lib = libs[0] break # give up :( if lib is None: return None # Resolve the file name into the soname dir, file = os.path.split(lib) return os.path.join(dir, getSoname(lib))
Look for a library in the system. Emulate the algorithm used by dlopen. `name`must include the prefix, e.g. ``libpython2.4.so``
Below is the the instruction that describes the task: ### Input: Look for a library in the system. Emulate the algorithm used by dlopen. `name`must include the prefix, e.g. ``libpython2.4.so`` ### Response: def findLibrary(name): """ Look for a library in the system. Emulate the algorithm used by dlopen. `name`must include the prefix, e.g. ``libpython2.4.so`` """ assert is_unix, "Current implementation for Unix only (Linux, Solaris, AIX)" lib = None # Look in the LD_LIBRARY_PATH lp = compat.getenv('LD_LIBRARY_PATH', '') for path in lp.split(os.pathsep): libs = glob(os.path.join(path, name + '*')) if libs: lib = libs[0] break # Look in /etc/ld.so.cache if lib is None: expr = r'/[^\(\)\s]*%s\.[^\(\)\s]*' % re.escape(name) m = re.search(expr, compat.exec_command('/sbin/ldconfig', '-p')) if m: lib = m.group(0) # Look in the known safe paths if lib is None: paths = ['/lib', '/usr/lib'] if is_aix: paths.append('/opt/freeware/lib') for path in paths: libs = glob(os.path.join(path, name + '*')) if libs: lib = libs[0] break # give up :( if lib is None: return None # Resolve the file name into the soname dir, file = os.path.split(lib) return os.path.join(dir, getSoname(lib))
def search_alert_deleted_for_facet(self, facet, **kwargs): # noqa: E501 """Lists the values of a specific facet over the customer's deleted alerts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_alert_deleted_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_alert_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501 else: (data) = self.search_alert_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501 return data
Lists the values of a specific facet over the customer's deleted alerts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_alert_deleted_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Lists the values of a specific facet over the customer's deleted alerts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_alert_deleted_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread. ### Response: def search_alert_deleted_for_facet(self, facet, **kwargs): # noqa: E501 """Lists the values of a specific facet over the customer's deleted alerts # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.search_alert_deleted_for_facet(facet, async_req=True) >>> result = thread.get() :param async_req bool :param str facet: (required) :param FacetSearchRequestContainer body: :return: ResponseContainerFacetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.search_alert_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501 else: (data) = self.search_alert_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501 return data
def _update_limits_from_api(self): """ Query EC2's DescribeAccountAttributes API action and update the network interface limit, as needed. Updates ``self.limits``. More info on the network interface limit, from the docs: 'This limit is the greater of either the default limit (350) or your On-Demand Instance limit multiplied by 5. The default limit for On-Demand Instances is 20.' """ self.connect() self.connect_resource() logger.info("Querying EC2 DescribeAccountAttributes for limits") attribs = self.conn.describe_account_attributes() for attrib in attribs['AccountAttributes']: if attrib['AttributeName'] == 'max-instances': val = attrib['AttributeValues'][0]['AttributeValue'] if int(val) * 5 > DEFAULT_ENI_LIMIT: limit_name = 'Network interfaces per Region' self.limits[limit_name]._set_api_limit(int(val) * 5) logger.debug("Done setting limits from API")
Query EC2's DescribeAccountAttributes API action and update the network interface limit, as needed. Updates ``self.limits``. More info on the network interface limit, from the docs: 'This limit is the greater of either the default limit (350) or your On-Demand Instance limit multiplied by 5. The default limit for On-Demand Instances is 20.'
Below is the the instruction that describes the task: ### Input: Query EC2's DescribeAccountAttributes API action and update the network interface limit, as needed. Updates ``self.limits``. More info on the network interface limit, from the docs: 'This limit is the greater of either the default limit (350) or your On-Demand Instance limit multiplied by 5. The default limit for On-Demand Instances is 20.' ### Response: def _update_limits_from_api(self): """ Query EC2's DescribeAccountAttributes API action and update the network interface limit, as needed. Updates ``self.limits``. More info on the network interface limit, from the docs: 'This limit is the greater of either the default limit (350) or your On-Demand Instance limit multiplied by 5. The default limit for On-Demand Instances is 20.' """ self.connect() self.connect_resource() logger.info("Querying EC2 DescribeAccountAttributes for limits") attribs = self.conn.describe_account_attributes() for attrib in attribs['AccountAttributes']: if attrib['AttributeName'] == 'max-instances': val = attrib['AttributeValues'][0]['AttributeValue'] if int(val) * 5 > DEFAULT_ENI_LIMIT: limit_name = 'Network interfaces per Region' self.limits[limit_name]._set_api_limit(int(val) * 5) logger.debug("Done setting limits from API")
def send_messages(self, messages): """ Send messages. :param list messages: List of SmsMessage instances. :returns: number of messages sended successful. :rtype: int """ counter = 0 for message in messages: res, _ = self._send(message) if res: counter += 1 return counter
Send messages. :param list messages: List of SmsMessage instances. :returns: number of messages sended successful. :rtype: int
Below is the the instruction that describes the task: ### Input: Send messages. :param list messages: List of SmsMessage instances. :returns: number of messages sended successful. :rtype: int ### Response: def send_messages(self, messages): """ Send messages. :param list messages: List of SmsMessage instances. :returns: number of messages sended successful. :rtype: int """ counter = 0 for message in messages: res, _ = self._send(message) if res: counter += 1 return counter
def get_calls_from_dict(self, file_dict, from_name, settings={}): ''' Processes unfolded yaml object to CallEdge array settings is a dict of settings for keeping information like in what section we are right now (e.g. builders, publishers) ''' calls = [] call_settings = dict(settings) # Include all possible sections # The way to draw them is defined in call graph special_sections = {'builders', 'publishers', 'wrappers'} # Trigger flags triggers = {'trigger-builds', 'trigger-parameterized-builds'} if isinstance(file_dict, dict): for key in file_dict: if key in special_sections: call_settings['section'] = key if key in triggers: calls.extend(self.extract_call(file_dict[key], from_name, settings=call_settings)) else: calls.extend(self.get_calls_from_dict(file_dict[key], from_name, settings=call_settings)) elif type(file_dict) == list: for value in file_dict: calls.extend(self.get_calls_from_dict(value, from_name, settings=call_settings)) return calls
Processes unfolded yaml object to CallEdge array settings is a dict of settings for keeping information like in what section we are right now (e.g. builders, publishers)
Below is the the instruction that describes the task: ### Input: Processes unfolded yaml object to CallEdge array settings is a dict of settings for keeping information like in what section we are right now (e.g. builders, publishers) ### Response: def get_calls_from_dict(self, file_dict, from_name, settings={}): ''' Processes unfolded yaml object to CallEdge array settings is a dict of settings for keeping information like in what section we are right now (e.g. builders, publishers) ''' calls = [] call_settings = dict(settings) # Include all possible sections # The way to draw them is defined in call graph special_sections = {'builders', 'publishers', 'wrappers'} # Trigger flags triggers = {'trigger-builds', 'trigger-parameterized-builds'} if isinstance(file_dict, dict): for key in file_dict: if key in special_sections: call_settings['section'] = key if key in triggers: calls.extend(self.extract_call(file_dict[key], from_name, settings=call_settings)) else: calls.extend(self.get_calls_from_dict(file_dict[key], from_name, settings=call_settings)) elif type(file_dict) == list: for value in file_dict: calls.extend(self.get_calls_from_dict(value, from_name, settings=call_settings)) return calls
def comment(self, comment): """ Add a :class:`Comment <pypump.models.comment.Comment>` to the object. :param comment: A :class:`Comment <pypump.models.comment.Comment>` instance, text content is also accepted. Example: >>> anote.comment(pump.Comment('I agree!')) """ if isinstance(comment, six.string_types): comment = self._pump.Comment(comment) comment.in_reply_to = self comment.send()
Add a :class:`Comment <pypump.models.comment.Comment>` to the object. :param comment: A :class:`Comment <pypump.models.comment.Comment>` instance, text content is also accepted. Example: >>> anote.comment(pump.Comment('I agree!'))
Below is the the instruction that describes the task: ### Input: Add a :class:`Comment <pypump.models.comment.Comment>` to the object. :param comment: A :class:`Comment <pypump.models.comment.Comment>` instance, text content is also accepted. Example: >>> anote.comment(pump.Comment('I agree!')) ### Response: def comment(self, comment): """ Add a :class:`Comment <pypump.models.comment.Comment>` to the object. :param comment: A :class:`Comment <pypump.models.comment.Comment>` instance, text content is also accepted. Example: >>> anote.comment(pump.Comment('I agree!')) """ if isinstance(comment, six.string_types): comment = self._pump.Comment(comment) comment.in_reply_to = self comment.send()
def operation_file(uploader, cmd, filename=''): """File operations""" if cmd == 'list': operation_list(uploader) if cmd == 'do': for path in filename: uploader.file_do(path) elif cmd == 'format': uploader.file_format() elif cmd == 'remove': for path in filename: uploader.file_remove(path) elif cmd == 'print': for path in filename: uploader.file_print(path)
File operations
Below is the the instruction that describes the task: ### Input: File operations ### Response: def operation_file(uploader, cmd, filename=''): """File operations""" if cmd == 'list': operation_list(uploader) if cmd == 'do': for path in filename: uploader.file_do(path) elif cmd == 'format': uploader.file_format() elif cmd == 'remove': for path in filename: uploader.file_remove(path) elif cmd == 'print': for path in filename: uploader.file_print(path)
def loadScopeGroupbyName(self, name, service_group_id, callback=None, errback=None): """ Load an existing Scope Group by name and service group id into a high level Scope Group object :param str name: Name of an existing Scope Group :param int service_group_id: id of the service group the Scope group is associated with """ import ns1.ipam scope_group = ns1.ipam.Scopegroup(self.config, name=name, service_group_id=service_group_id) return scope_group.load(callback=callback, errback=errback)
Load an existing Scope Group by name and service group id into a high level Scope Group object :param str name: Name of an existing Scope Group :param int service_group_id: id of the service group the Scope group is associated with
Below is the the instruction that describes the task: ### Input: Load an existing Scope Group by name and service group id into a high level Scope Group object :param str name: Name of an existing Scope Group :param int service_group_id: id of the service group the Scope group is associated with ### Response: def loadScopeGroupbyName(self, name, service_group_id, callback=None, errback=None): """ Load an existing Scope Group by name and service group id into a high level Scope Group object :param str name: Name of an existing Scope Group :param int service_group_id: id of the service group the Scope group is associated with """ import ns1.ipam scope_group = ns1.ipam.Scopegroup(self.config, name=name, service_group_id=service_group_id) return scope_group.load(callback=callback, errback=errback)
def week(self): """ Returns an ``int`` of the week number in the season, such as 1 for the first week of the regular season. """ if self._week.lower() == 'wild card': return WILD_CARD if self._week.lower() == 'division': return DIVISION if self._week.lower() == 'conf. champ.': return CONF_CHAMPIONSHIP if self._week.lower() == 'superbowl': return SUPER_BOWL return self._week
Returns an ``int`` of the week number in the season, such as 1 for the first week of the regular season.
Below is the the instruction that describes the task: ### Input: Returns an ``int`` of the week number in the season, such as 1 for the first week of the regular season. ### Response: def week(self): """ Returns an ``int`` of the week number in the season, such as 1 for the first week of the regular season. """ if self._week.lower() == 'wild card': return WILD_CARD if self._week.lower() == 'division': return DIVISION if self._week.lower() == 'conf. champ.': return CONF_CHAMPIONSHIP if self._week.lower() == 'superbowl': return SUPER_BOWL return self._week
def _wrap(text, columns=80): """ Own "dumb" reimplementation of textwrap.wrap(). This is because calling .wrap() on bigger strings can take a LOT of processor power. And I mean like 8 seconds of 3GHz CPU just to wrap 20kB of text without spaces. Args: text (str): Text to wrap. columns (int): Wrap after `columns` characters. Returns: str: Wrapped text. """ out = [] for cnt, char in enumerate(text): out.append(char) if (cnt + 1) % columns == 0: out.append("\n") return "".join(out)
Own "dumb" reimplementation of textwrap.wrap(). This is because calling .wrap() on bigger strings can take a LOT of processor power. And I mean like 8 seconds of 3GHz CPU just to wrap 20kB of text without spaces. Args: text (str): Text to wrap. columns (int): Wrap after `columns` characters. Returns: str: Wrapped text.
Below is the the instruction that describes the task: ### Input: Own "dumb" reimplementation of textwrap.wrap(). This is because calling .wrap() on bigger strings can take a LOT of processor power. And I mean like 8 seconds of 3GHz CPU just to wrap 20kB of text without spaces. Args: text (str): Text to wrap. columns (int): Wrap after `columns` characters. Returns: str: Wrapped text. ### Response: def _wrap(text, columns=80): """ Own "dumb" reimplementation of textwrap.wrap(). This is because calling .wrap() on bigger strings can take a LOT of processor power. And I mean like 8 seconds of 3GHz CPU just to wrap 20kB of text without spaces. Args: text (str): Text to wrap. columns (int): Wrap after `columns` characters. Returns: str: Wrapped text. """ out = [] for cnt, char in enumerate(text): out.append(char) if (cnt + 1) % columns == 0: out.append("\n") return "".join(out)
def SetCACertificatesPath(self, ca_certificates_path): """Sets the path to the CA certificates. Args: ca_certificates_path (str): path to file containing a list of root certificates to trust. Raises: BadConfigOption: if the CA certificates file does not exist. """ if not ca_certificates_path: return if not os.path.exists(ca_certificates_path): raise errors.BadConfigOption( 'No such certificate file: {0:s}.'.format(ca_certificates_path)) self._ca_certs = ca_certificates_path logger.debug('Elasticsearch ca_certs: {0!s}'.format(ca_certificates_path))
Sets the path to the CA certificates. Args: ca_certificates_path (str): path to file containing a list of root certificates to trust. Raises: BadConfigOption: if the CA certificates file does not exist.
Below is the the instruction that describes the task: ### Input: Sets the path to the CA certificates. Args: ca_certificates_path (str): path to file containing a list of root certificates to trust. Raises: BadConfigOption: if the CA certificates file does not exist. ### Response: def SetCACertificatesPath(self, ca_certificates_path): """Sets the path to the CA certificates. Args: ca_certificates_path (str): path to file containing a list of root certificates to trust. Raises: BadConfigOption: if the CA certificates file does not exist. """ if not ca_certificates_path: return if not os.path.exists(ca_certificates_path): raise errors.BadConfigOption( 'No such certificate file: {0:s}.'.format(ca_certificates_path)) self._ca_certs = ca_certificates_path logger.debug('Elasticsearch ca_certs: {0!s}'.format(ca_certificates_path))
def check_cache(path): """Return true if the cache file holding list of all datasets does not exist or is older than 30 days """ if not os.path.exists(path): return True else: # check the age mod_date = datetime.fromtimestamp(os.path.getmtime(path)) if mod_date < (datetime.now() - timedelta(days=30)): return True else: return False
Return true if the cache file holding list of all datasets does not exist or is older than 30 days
Below is the the instruction that describes the task: ### Input: Return true if the cache file holding list of all datasets does not exist or is older than 30 days ### Response: def check_cache(path): """Return true if the cache file holding list of all datasets does not exist or is older than 30 days """ if not os.path.exists(path): return True else: # check the age mod_date = datetime.fromtimestamp(os.path.getmtime(path)) if mod_date < (datetime.now() - timedelta(days=30)): return True else: return False
def handleCardDblClick( self, item ): """ Handles when a card item is double clicked on. :param item | <QTreeWidgetItem> """ widget = self.uiCardTREE.itemWidget(item, 0) if ( isinstance(widget, XAbstractCardWidget) ): self.emitRecordDoubleClicked(widget.record())
Handles when a card item is double clicked on. :param item | <QTreeWidgetItem>
Below is the the instruction that describes the task: ### Input: Handles when a card item is double clicked on. :param item | <QTreeWidgetItem> ### Response: def handleCardDblClick( self, item ): """ Handles when a card item is double clicked on. :param item | <QTreeWidgetItem> """ widget = self.uiCardTREE.itemWidget(item, 0) if ( isinstance(widget, XAbstractCardWidget) ): self.emitRecordDoubleClicked(widget.record())
def init_logging(): """Initialise Python logging.""" fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \ '| %(message)s' logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
Initialise Python logging.
Below is the the instruction that describes the task: ### Input: Initialise Python logging. ### Response: def init_logging(): """Initialise Python logging.""" fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \ '| %(message)s' logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
def gaus_pdf(x, mean, std): '''Gaussian distribution's probability density function. See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_. :param x: point in x-axis :type x: float or numpy.ndarray :param float mean: mean or expectation :param float str: standard deviation :returns: pdf(s) in point **x** :rtype: float or numpy.ndarray ''' return exp(-((x - mean) / std)**2 / 2) / sqrt(2 * pi) / std
Gaussian distribution's probability density function. See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_. :param x: point in x-axis :type x: float or numpy.ndarray :param float mean: mean or expectation :param float str: standard deviation :returns: pdf(s) in point **x** :rtype: float or numpy.ndarray
Below is the the instruction that describes the task: ### Input: Gaussian distribution's probability density function. See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_. :param x: point in x-axis :type x: float or numpy.ndarray :param float mean: mean or expectation :param float str: standard deviation :returns: pdf(s) in point **x** :rtype: float or numpy.ndarray ### Response: def gaus_pdf(x, mean, std): '''Gaussian distribution's probability density function. See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_. :param x: point in x-axis :type x: float or numpy.ndarray :param float mean: mean or expectation :param float str: standard deviation :returns: pdf(s) in point **x** :rtype: float or numpy.ndarray ''' return exp(-((x - mean) / std)**2 / 2) / sqrt(2 * pi) / std
def _handle_id(self, node, scope, ctxt, stream): """Handle an ID node (return a field object for the ID) :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ if node.name == "__root": return self._root if node.name == "__this" or node.name == "this": return ctxt self._dlog("handling id {}".format(node.name)) field = scope.get_id(node.name) is_lazy = getattr(node, "is_lazy", False) if field is None and not is_lazy: raise errors.UnresolvedID(node.coord, node.name) elif is_lazy: return LazyField(node.name, scope) return field
Handle an ID node (return a field object for the ID) :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
Below is the the instruction that describes the task: ### Input: Handle an ID node (return a field object for the ID) :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO ### Response: def _handle_id(self, node, scope, ctxt, stream): """Handle an ID node (return a field object for the ID) :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ if node.name == "__root": return self._root if node.name == "__this" or node.name == "this": return ctxt self._dlog("handling id {}".format(node.name)) field = scope.get_id(node.name) is_lazy = getattr(node, "is_lazy", False) if field is None and not is_lazy: raise errors.UnresolvedID(node.coord, node.name) elif is_lazy: return LazyField(node.name, scope) return field
def time_from_match(match_object): """Create a time object from a regular expression match. The regular expression match is expected to be from RE_TIME or RE_DATETIME. @param match_object: The regular expression match. @type value: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{time} """ hour = int(match_object.group('hour')) minute = int(match_object.group('minute')) second = int(match_object.group('second')) subsecond = match_object.group('subsecond') microsecond = 0 if subsecond is not None: subsecond_denominator = 10.0 ** len(subsecond) subsecond = int(subsecond) microsecond = subsecond * (1000000 / subsecond_denominator) microsecond = int(round(microsecond)) return datetime.time(hour, minute, second, microsecond)
Create a time object from a regular expression match. The regular expression match is expected to be from RE_TIME or RE_DATETIME. @param match_object: The regular expression match. @type value: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{time}
Below is the the instruction that describes the task: ### Input: Create a time object from a regular expression match. The regular expression match is expected to be from RE_TIME or RE_DATETIME. @param match_object: The regular expression match. @type value: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{time} ### Response: def time_from_match(match_object): """Create a time object from a regular expression match. The regular expression match is expected to be from RE_TIME or RE_DATETIME. @param match_object: The regular expression match. @type value: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{time} """ hour = int(match_object.group('hour')) minute = int(match_object.group('minute')) second = int(match_object.group('second')) subsecond = match_object.group('subsecond') microsecond = 0 if subsecond is not None: subsecond_denominator = 10.0 ** len(subsecond) subsecond = int(subsecond) microsecond = subsecond * (1000000 / subsecond_denominator) microsecond = int(round(microsecond)) return datetime.time(hour, minute, second, microsecond)
def write(self, valuedict, version): """Generates the lines for the converted input file from the valuedict. :arg valuedict: a dictionary of values where the keys are ids in the template and the values obey their template rules. :arg version: the target version of the output file. """ result = [] if version in self.versions: for tag in self.versions[version].order: entry = self.versions[version].entries[tag] result.extend(entry.write(valuedict)) return result
Generates the lines for the converted input file from the valuedict. :arg valuedict: a dictionary of values where the keys are ids in the template and the values obey their template rules. :arg version: the target version of the output file.
Below is the the instruction that describes the task: ### Input: Generates the lines for the converted input file from the valuedict. :arg valuedict: a dictionary of values where the keys are ids in the template and the values obey their template rules. :arg version: the target version of the output file. ### Response: def write(self, valuedict, version): """Generates the lines for the converted input file from the valuedict. :arg valuedict: a dictionary of values where the keys are ids in the template and the values obey their template rules. :arg version: the target version of the output file. """ result = [] if version in self.versions: for tag in self.versions[version].order: entry = self.versions[version].entries[tag] result.extend(entry.write(valuedict)) return result
def update(self): """ Fetches the updated ring from Redis and updates the current ranges. """ ring = self._fetch() n_replicas = len(ring) replica_set = set([r[1] for r in self.replicas]) self.ranges = [] for n, (start, replica) in enumerate(ring): if replica in replica_set: end = ring[(n+1) % n_replicas][0] % RING_SIZE if start < end: self.ranges.append((start, end)) elif end < start: self.ranges.append((start, RING_SIZE)) self.ranges.append((0, end)) else: self.ranges.append((0, RING_SIZE))
Fetches the updated ring from Redis and updates the current ranges.
Below is the the instruction that describes the task: ### Input: Fetches the updated ring from Redis and updates the current ranges. ### Response: def update(self): """ Fetches the updated ring from Redis and updates the current ranges. """ ring = self._fetch() n_replicas = len(ring) replica_set = set([r[1] for r in self.replicas]) self.ranges = [] for n, (start, replica) in enumerate(ring): if replica in replica_set: end = ring[(n+1) % n_replicas][0] % RING_SIZE if start < end: self.ranges.append((start, end)) elif end < start: self.ranges.append((start, RING_SIZE)) self.ranges.append((0, end)) else: self.ranges.append((0, RING_SIZE))
def _post_run_hook(self, runtime): ''' generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid ''' self._anat_file = self.inputs.in_file self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file self._seg_files = [self._mask_file] self._masked = self.inputs.mask NIWORKFLOWS_LOG.info('Generating report for BET. file "%s", and mask file "%s"', self._anat_file, self._mask_file) return super(BETRPT, self)._post_run_hook(runtime)
generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid
Below is the the instruction that describes the task: ### Input: generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid ### Response: def _post_run_hook(self, runtime): ''' generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid ''' self._anat_file = self.inputs.in_file self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file self._seg_files = [self._mask_file] self._masked = self.inputs.mask NIWORKFLOWS_LOG.info('Generating report for BET. file "%s", and mask file "%s"', self._anat_file, self._mask_file) return super(BETRPT, self)._post_run_hook(runtime)
def fullpath(relpath): '''Relative path to absolute''' if (type(relpath) is object or type(relpath) is file): relpath = relpath.name return os.path.abspath(os.path.expanduser(relpath))
Relative path to absolute
Below is the the instruction that describes the task: ### Input: Relative path to absolute ### Response: def fullpath(relpath): '''Relative path to absolute''' if (type(relpath) is object or type(relpath) is file): relpath = relpath.name return os.path.abspath(os.path.expanduser(relpath))
def long_to_hex(l, size): """Encode a long value as a hex string, 0-padding to size. Note that size is the size of the resulting hex string. So, for a 32Byte long size should be 64 (two hex characters per byte".""" f_str = "{0:0%sx}" % size return ensure_bytes(f_str.format(l).lower())
Encode a long value as a hex string, 0-padding to size. Note that size is the size of the resulting hex string. So, for a 32Byte long size should be 64 (two hex characters per byte".
Below is the the instruction that describes the task: ### Input: Encode a long value as a hex string, 0-padding to size. Note that size is the size of the resulting hex string. So, for a 32Byte long size should be 64 (two hex characters per byte". ### Response: def long_to_hex(l, size): """Encode a long value as a hex string, 0-padding to size. Note that size is the size of the resulting hex string. So, for a 32Byte long size should be 64 (two hex characters per byte".""" f_str = "{0:0%sx}" % size return ensure_bytes(f_str.format(l).lower())
def _push_property_schema(self, prop): """Construct a sub-schema from a property of the current schema.""" schema = Schema(self._schema.properties[prop]) self._push_schema(schema, ".properties." + prop)
Construct a sub-schema from a property of the current schema.
Below is the the instruction that describes the task: ### Input: Construct a sub-schema from a property of the current schema. ### Response: def _push_property_schema(self, prop): """Construct a sub-schema from a property of the current schema.""" schema = Schema(self._schema.properties[prop]) self._push_schema(schema, ".properties." + prop)
def threshold_monitor_hidden_threshold_monitor_sfp_apply(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") sfp = ET.SubElement(threshold_monitor, "sfp") apply = ET.SubElement(sfp, "apply") apply.text = kwargs.pop('apply') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def threshold_monitor_hidden_threshold_monitor_sfp_apply(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") sfp = ET.SubElement(threshold_monitor, "sfp") apply = ET.SubElement(sfp, "apply") apply.text = kwargs.pop('apply') callback = kwargs.pop('callback', self._callback) return callback(config)
def publishing_prepare_published_copy(self, draft_obj): """ Prepare published copy of draft prior to saving it """ # We call super here, somewhat perversely, to ensure this method will # be called on publishable subclasses if implemented there. mysuper = super(PublishingModel, self) if hasattr(mysuper, 'publishing_prepare_published_copy'): mysuper.publishing_prepare_published_copy(draft_obj)
Prepare published copy of draft prior to saving it
Below is the the instruction that describes the task: ### Input: Prepare published copy of draft prior to saving it ### Response: def publishing_prepare_published_copy(self, draft_obj): """ Prepare published copy of draft prior to saving it """ # We call super here, somewhat perversely, to ensure this method will # be called on publishable subclasses if implemented there. mysuper = super(PublishingModel, self) if hasattr(mysuper, 'publishing_prepare_published_copy'): mysuper.publishing_prepare_published_copy(draft_obj)
def set_field_value(self, name, value): """ Set the value to the field modified_data """ name = self.get_real_name(name) if not name or not self._can_write_field(name): return if name in self.__deleted_fields__: self.__deleted_fields__.remove(name) if self.__original_data__.get(name) == value: try: self.__modified_data__.pop(name) except KeyError: pass else: self.__modified_data__[name] = value self._prepare_child(value) if name not in self.__structure__ or not self.__structure__[name].read_only: return try: value.set_read_only(True) except AttributeError: pass
Set the value to the field modified_data
Below is the the instruction that describes the task: ### Input: Set the value to the field modified_data ### Response: def set_field_value(self, name, value): """ Set the value to the field modified_data """ name = self.get_real_name(name) if not name or not self._can_write_field(name): return if name in self.__deleted_fields__: self.__deleted_fields__.remove(name) if self.__original_data__.get(name) == value: try: self.__modified_data__.pop(name) except KeyError: pass else: self.__modified_data__[name] = value self._prepare_child(value) if name not in self.__structure__ or not self.__structure__[name].read_only: return try: value.set_read_only(True) except AttributeError: pass
def zero_state(qubits: Union[int, Qubits]) -> State: """Return the all-zero state on N qubits""" N, qubits = qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) ket[(0,) * N] = 1 return State(ket, qubits)
Return the all-zero state on N qubits
Below is the the instruction that describes the task: ### Input: Return the all-zero state on N qubits ### Response: def zero_state(qubits: Union[int, Qubits]) -> State: """Return the all-zero state on N qubits""" N, qubits = qubits_count_tuple(qubits) ket = np.zeros(shape=[2] * N) ket[(0,) * N] = 1 return State(ket, qubits)
def construct_codons_dict(alphabet_file = None): """Generate the sub_codons_right dictionary of codon suffixes. syntax of custom alphabet_files: char: list,of,amino,acids,or,codons,separated,by,commas Parameters ---------- alphabet_file : str File name for a custom alphabet definition. If no file is provided, the default alphabet is used, i.e. standard amino acids, undetermined amino acids (B, J, X, and Z), and single codon symbols. Returns ------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. """ #Some symbols can't be used in the CDR3 sequences in order to allow for #regular expression parsing and general manipulation. protected_symbols = [' ', '\t', '\n', '\x0b', '\x0c', '\r', ':', ',', ';', '[', ']', '{', '}', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] #construct list of all 64 codons codons = [i + j + k for i in 'ACGT' for j in 'ACGT' for k in 'ACGT'] codons_dict = {} #add standard amino acids symbols to the dict (i.e. 'ACDEFGHIKLMNPQRSTVWY*'). #these symbols CANNOT be overwritten by custom alphabet files for codon in codons: codons_dict[nt2aa(codon)] = codons_dict.get(nt2aa(codon), []) + [codon] #add single codon symbols to allow for inframe ntseq pgen computation #'\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf' #these symbols CANNOT be overwritten by custom alphabet files for codon in codons: codons_dict[nt2codon_rep(codon)] = [codon] #Check to see if custom alphabet file is supplied, else use default alphabet #Include standard ambigious amino acids. #these symbols CAN be overwritten by custom alphabet files expanded_alphabet = {} expanded_alphabet['B'] = ['D','N'] expanded_alphabet['J'] = ['I', 'L'] expanded_alphabet['X'] = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y'] expanded_alphabet['Z'] = ['E', 'Q'] if alphabet_file is not None: #Use custom alphabet file definitions alphabet_f = open(alphabet_file, 'r') for line in alphabet_f: #assumed syntax is of a line is: #s: a1, a2, a3, a4, a5, ..., aN #where s is a single character symbol that isn't reserved, and all #of the a's are either amino acid symbols or codons. Whitespaces #will be stripped as will brackets if the a's are presented as a #list. c_symbol = line.split(':', 1)[0].strip(''.join(protected_symbols)) #Note there shouldn't be any additional colons -- this is a protected symbol. c_aa_codon_list_str = line.split(':', 1)[1] expanded_alphabet[c_symbol] = [x.strip(''.join(protected_symbols)) for x in c_aa_codon_list_str.split(',')] alphabet_f.close() for symbol in expanded_alphabet.keys(): #Double check that the symbol isn't already used (important particularly for the single codon representation) if symbol in codons_dict.keys(): print symbol + " is already used as an 'amino acid' symbol for codons: " print codons_dict[symbol] continue elif not len(symbol) == 1: #Check that the custom symbol is a single character print "Can't use " + symbol + " as a custom 'amino acid' definitions as such symbols must be single characters." continue elif symbol in protected_symbols: #This elif shouldn't trigger due to the stripping of protected symbols. print symbol + " is a protected character" current_codon_collection = set() for x in expanded_alphabet[symbol]: if x in codons_dict.keys(): #Check if reference to an amino acid or other amino acid symbol current_codon_collection = current_codon_collection.union(codons_dict[x]) #If so, add those codons to the new collection elif x.upper() in codons: #Check if specifying a single codon current_codon_collection.add(x.upper()) #If so, add the codon to the new collection elif len(x) == 0: #fully stripped away continue else: #If not, don't recognize the addition and continue. print 'Unfamiliar amino acid symbol or codon: ' + x continue codons_dict[symbol] = list(current_codon_collection) return codons_dict
Generate the sub_codons_right dictionary of codon suffixes. syntax of custom alphabet_files: char: list,of,amino,acids,or,codons,separated,by,commas Parameters ---------- alphabet_file : str File name for a custom alphabet definition. If no file is provided, the default alphabet is used, i.e. standard amino acids, undetermined amino acids (B, J, X, and Z), and single codon symbols. Returns ------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol.
Below is the the instruction that describes the task: ### Input: Generate the sub_codons_right dictionary of codon suffixes. syntax of custom alphabet_files: char: list,of,amino,acids,or,codons,separated,by,commas Parameters ---------- alphabet_file : str File name for a custom alphabet definition. If no file is provided, the default alphabet is used, i.e. standard amino acids, undetermined amino acids (B, J, X, and Z), and single codon symbols. Returns ------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. ### Response: def construct_codons_dict(alphabet_file = None): """Generate the sub_codons_right dictionary of codon suffixes. syntax of custom alphabet_files: char: list,of,amino,acids,or,codons,separated,by,commas Parameters ---------- alphabet_file : str File name for a custom alphabet definition. If no file is provided, the default alphabet is used, i.e. standard amino acids, undetermined amino acids (B, J, X, and Z), and single codon symbols. Returns ------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. """ #Some symbols can't be used in the CDR3 sequences in order to allow for #regular expression parsing and general manipulation. protected_symbols = [' ', '\t', '\n', '\x0b', '\x0c', '\r', ':', ',', ';', '[', ']', '{', '}', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] #construct list of all 64 codons codons = [i + j + k for i in 'ACGT' for j in 'ACGT' for k in 'ACGT'] codons_dict = {} #add standard amino acids symbols to the dict (i.e. 'ACDEFGHIKLMNPQRSTVWY*'). #these symbols CANNOT be overwritten by custom alphabet files for codon in codons: codons_dict[nt2aa(codon)] = codons_dict.get(nt2aa(codon), []) + [codon] #add single codon symbols to allow for inframe ntseq pgen computation #'\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf' #these symbols CANNOT be overwritten by custom alphabet files for codon in codons: codons_dict[nt2codon_rep(codon)] = [codon] #Check to see if custom alphabet file is supplied, else use default alphabet #Include standard ambigious amino acids. #these symbols CAN be overwritten by custom alphabet files expanded_alphabet = {} expanded_alphabet['B'] = ['D','N'] expanded_alphabet['J'] = ['I', 'L'] expanded_alphabet['X'] = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y'] expanded_alphabet['Z'] = ['E', 'Q'] if alphabet_file is not None: #Use custom alphabet file definitions alphabet_f = open(alphabet_file, 'r') for line in alphabet_f: #assumed syntax is of a line is: #s: a1, a2, a3, a4, a5, ..., aN #where s is a single character symbol that isn't reserved, and all #of the a's are either amino acid symbols or codons. Whitespaces #will be stripped as will brackets if the a's are presented as a #list. c_symbol = line.split(':', 1)[0].strip(''.join(protected_symbols)) #Note there shouldn't be any additional colons -- this is a protected symbol. c_aa_codon_list_str = line.split(':', 1)[1] expanded_alphabet[c_symbol] = [x.strip(''.join(protected_symbols)) for x in c_aa_codon_list_str.split(',')] alphabet_f.close() for symbol in expanded_alphabet.keys(): #Double check that the symbol isn't already used (important particularly for the single codon representation) if symbol in codons_dict.keys(): print symbol + " is already used as an 'amino acid' symbol for codons: " print codons_dict[symbol] continue elif not len(symbol) == 1: #Check that the custom symbol is a single character print "Can't use " + symbol + " as a custom 'amino acid' definitions as such symbols must be single characters." continue elif symbol in protected_symbols: #This elif shouldn't trigger due to the stripping of protected symbols. print symbol + " is a protected character" current_codon_collection = set() for x in expanded_alphabet[symbol]: if x in codons_dict.keys(): #Check if reference to an amino acid or other amino acid symbol current_codon_collection = current_codon_collection.union(codons_dict[x]) #If so, add those codons to the new collection elif x.upper() in codons: #Check if specifying a single codon current_codon_collection.add(x.upper()) #If so, add the codon to the new collection elif len(x) == 0: #fully stripped away continue else: #If not, don't recognize the addition and continue. print 'Unfamiliar amino acid symbol or codon: ' + x continue codons_dict[symbol] = list(current_codon_collection) return codons_dict
def oauth_connect(self, provider, action): """ This endpoint doesn't check if user is logged in, because it has two functions 1. If the user is not logged in, it will try to signup the user - if the social info exist, it will login - not, it will create a new account and proceed 2. If user is logged in, it will try to create a social login entry with the current user **** This methods doesn't save the user token, it only retrieves the ID to login or ID, name, email if signing up :param provider: :param action: connect|authorized| - connect: to connect to the endpoint - authorized, when coming back """ valid_actions = ["connect", "authorized", "test"] _redirect = views.auth.Account.account_settings if is_authenticated() else self.login if action not in valid_actions \ or "oauth" not in __options__.get("registration_methods") \ or not __options__.get("allow_registration") \ or not hasattr(oauth, provider): return redirect(_redirect) client = getattr(oauth, provider) params = client.__params__ me_args = params.get("me") user_id = params.get("user_id") oauth_user_id = None oauth_name = None oauth_email = None if action == "test": session_data = { "provider": "ensure", "user_id": "1234", "name": "Mardix", "email": "mardix@email.com", } set_oauth_session(session_data) return redirect(url_for(self.register, oauth=1)) if action == "connect": _next = request.args.get('next') authorized_url = url_for(self, provider=provider, action="authorized", next=_next or request.referrer or None, _external=True) return client.authorize(callback=authorized_url) elif action == "authorized": resp = client.authorized_response() if resp is None: pass elif isinstance(resp, OAuthException): flash_error("Access Denied") else: if not me_args: oauth_user_id = resp.get(user_id) else: me = client.get(me_args) if action == "authorized" and oauth_user_id: if is_authenticated(): try: # Add federated login to current_user current_user.add_federated_login(provider=provider, federated_id=oauth_user_id) flash_success( "You can now login with your %s account" % provider.upper()) except Exception as e: logging.exception(e) return redirect(views.auth.Account.account_settings) # User not logged in else: # Existing user user = with_federation(provider, oauth_user_id) if user: create_session(user) return redirect(request.args.get("next") or __options__.get( "login_view")) # New User else: session_data = { "provider": provider, "user_id": oauth_user_id, "name": oauth_name, "email": oauth_email, } set_oauth_session(session_data) else: return redirect(_redirect) return { "action": action, "provider": provider, "authorized_url": "" } return redirect(_redirect)
This endpoint doesn't check if user is logged in, because it has two functions 1. If the user is not logged in, it will try to signup the user - if the social info exist, it will login - not, it will create a new account and proceed 2. If user is logged in, it will try to create a social login entry with the current user **** This methods doesn't save the user token, it only retrieves the ID to login or ID, name, email if signing up :param provider: :param action: connect|authorized| - connect: to connect to the endpoint - authorized, when coming back
Below is the the instruction that describes the task: ### Input: This endpoint doesn't check if user is logged in, because it has two functions 1. If the user is not logged in, it will try to signup the user - if the social info exist, it will login - not, it will create a new account and proceed 2. If user is logged in, it will try to create a social login entry with the current user **** This methods doesn't save the user token, it only retrieves the ID to login or ID, name, email if signing up :param provider: :param action: connect|authorized| - connect: to connect to the endpoint - authorized, when coming back ### Response: def oauth_connect(self, provider, action): """ This endpoint doesn't check if user is logged in, because it has two functions 1. If the user is not logged in, it will try to signup the user - if the social info exist, it will login - not, it will create a new account and proceed 2. If user is logged in, it will try to create a social login entry with the current user **** This methods doesn't save the user token, it only retrieves the ID to login or ID, name, email if signing up :param provider: :param action: connect|authorized| - connect: to connect to the endpoint - authorized, when coming back """ valid_actions = ["connect", "authorized", "test"] _redirect = views.auth.Account.account_settings if is_authenticated() else self.login if action not in valid_actions \ or "oauth" not in __options__.get("registration_methods") \ or not __options__.get("allow_registration") \ or not hasattr(oauth, provider): return redirect(_redirect) client = getattr(oauth, provider) params = client.__params__ me_args = params.get("me") user_id = params.get("user_id") oauth_user_id = None oauth_name = None oauth_email = None if action == "test": session_data = { "provider": "ensure", "user_id": "1234", "name": "Mardix", "email": "mardix@email.com", } set_oauth_session(session_data) return redirect(url_for(self.register, oauth=1)) if action == "connect": _next = request.args.get('next') authorized_url = url_for(self, provider=provider, action="authorized", next=_next or request.referrer or None, _external=True) return client.authorize(callback=authorized_url) elif action == "authorized": resp = client.authorized_response() if resp is None: pass elif isinstance(resp, OAuthException): flash_error("Access Denied") else: if not me_args: oauth_user_id = resp.get(user_id) else: me = client.get(me_args) if action == "authorized" and oauth_user_id: if is_authenticated(): try: # Add federated login to current_user current_user.add_federated_login(provider=provider, federated_id=oauth_user_id) flash_success( "You can now login with your %s account" % provider.upper()) except Exception as e: logging.exception(e) return redirect(views.auth.Account.account_settings) # User not logged in else: # Existing user user = with_federation(provider, oauth_user_id) if user: create_session(user) return redirect(request.args.get("next") or __options__.get( "login_view")) # New User else: session_data = { "provider": provider, "user_id": oauth_user_id, "name": oauth_name, "email": oauth_email, } set_oauth_session(session_data) else: return redirect(_redirect) return { "action": action, "provider": provider, "authorized_url": "" } return redirect(_redirect)