code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def add_vts(self, task_name, targets, cache_key, valid, phase): """ Add a single VersionedTargetSet entry to the report. :param InvalidationCacheManager cache_manager: :param CacheKey cache_key: :param bool valid: :param string phase: """ if task_name not in self._task_reports: self.add_task(task_name) self._task_reports[task_name].add(targets, cache_key, valid, phase)
Add a single VersionedTargetSet entry to the report. :param InvalidationCacheManager cache_manager: :param CacheKey cache_key: :param bool valid: :param string phase:
Below is the the instruction that describes the task: ### Input: Add a single VersionedTargetSet entry to the report. :param InvalidationCacheManager cache_manager: :param CacheKey cache_key: :param bool valid: :param string phase: ### Response: def add_vts(self, task_name, targets, cache_key, valid, phase): """ Add a single VersionedTargetSet entry to the report. :param InvalidationCacheManager cache_manager: :param CacheKey cache_key: :param bool valid: :param string phase: """ if task_name not in self._task_reports: self.add_task(task_name) self._task_reports[task_name].add(targets, cache_key, valid, phase)
def node_setup(name, master, ticket): ''' Setup the icinga2 node. name The domain name for which this certificate will be saved master Icinga2 master node for which this certificate will be saved ticket Authentication ticket generated on icinga2 master ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} cert = "{0}{1}.crt.orig".format(get_certs_path(), name) key = "{0}{1}.key.orig".format(get_certs_path(), name) # Checking if execution is needed. if os.path.isfile(cert) and os.path.isfile(cert): ret['comment'] = 'No execution needed. Node already configured.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Node setup will be executed.' return ret # Executing the command. node_setup = __salt__['icinga2.node_setup'](name, master, ticket) if not node_setup['retcode']: ret['comment'] = "Node setup executed." ret['changes']['cert'] = "Node setup finished successfully." return ret ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout']) ret['result'] = False return ret
Setup the icinga2 node. name The domain name for which this certificate will be saved master Icinga2 master node for which this certificate will be saved ticket Authentication ticket generated on icinga2 master
Below is the the instruction that describes the task: ### Input: Setup the icinga2 node. name The domain name for which this certificate will be saved master Icinga2 master node for which this certificate will be saved ticket Authentication ticket generated on icinga2 master ### Response: def node_setup(name, master, ticket): ''' Setup the icinga2 node. name The domain name for which this certificate will be saved master Icinga2 master node for which this certificate will be saved ticket Authentication ticket generated on icinga2 master ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} cert = "{0}{1}.crt.orig".format(get_certs_path(), name) key = "{0}{1}.key.orig".format(get_certs_path(), name) # Checking if execution is needed. if os.path.isfile(cert) and os.path.isfile(cert): ret['comment'] = 'No execution needed. Node already configured.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Node setup will be executed.' return ret # Executing the command. node_setup = __salt__['icinga2.node_setup'](name, master, ticket) if not node_setup['retcode']: ret['comment'] = "Node setup executed." ret['changes']['cert'] = "Node setup finished successfully." return ret ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout']) ret['result'] = False return ret
def make_classes(base_class, module): """Create derived classes and put them into the same module as the base class. This function is called at the end of each of the derived class modules, acs, census, civik and tiger. It will create a set of new derived class in the module, one for each of the enries in the `summary_levels` dict. """ from functools import partial for k in names: cls = base_class.class_factory(k.capitalize()) cls.augment() setattr(module, k.capitalize(), cls) setattr(module, 'get_class', partial(get_class, module))
Create derived classes and put them into the same module as the base class. This function is called at the end of each of the derived class modules, acs, census, civik and tiger. It will create a set of new derived class in the module, one for each of the enries in the `summary_levels` dict.
Below is the the instruction that describes the task: ### Input: Create derived classes and put them into the same module as the base class. This function is called at the end of each of the derived class modules, acs, census, civik and tiger. It will create a set of new derived class in the module, one for each of the enries in the `summary_levels` dict. ### Response: def make_classes(base_class, module): """Create derived classes and put them into the same module as the base class. This function is called at the end of each of the derived class modules, acs, census, civik and tiger. It will create a set of new derived class in the module, one for each of the enries in the `summary_levels` dict. """ from functools import partial for k in names: cls = base_class.class_factory(k.capitalize()) cls.augment() setattr(module, k.capitalize(), cls) setattr(module, 'get_class', partial(get_class, module))
def _read_single_point_data(self): """ Parses final free energy information from single-point calculations. """ temp_dict = read_pattern( self.text, { "final_energy": r"\s*SCF\s+energy in the final basis set\s+=\s*([\d\-\.]+)" }) if temp_dict.get('final_energy') == None: self.data['final_energy'] = None else: # -1 in case of pcm # Two lines will match the above; we want final calculation self.data['final_energy'] = float(temp_dict.get('final_energy')[-1][0])
Parses final free energy information from single-point calculations.
Below is the the instruction that describes the task: ### Input: Parses final free energy information from single-point calculations. ### Response: def _read_single_point_data(self): """ Parses final free energy information from single-point calculations. """ temp_dict = read_pattern( self.text, { "final_energy": r"\s*SCF\s+energy in the final basis set\s+=\s*([\d\-\.]+)" }) if temp_dict.get('final_energy') == None: self.data['final_energy'] = None else: # -1 in case of pcm # Two lines will match the above; we want final calculation self.data['final_energy'] = float(temp_dict.get('final_energy')[-1][0])
def set_policy(table='filter', chain=None, policy=None, family='ipv4'): ''' Set the current policy for the specified table/chain CLI Example: .. code-block:: bash salt '*' iptables.set_policy filter INPUT ACCEPT IPv6: salt '*' iptables.set_policy filter INPUT ACCEPT family=ipv6 ''' if not chain: return 'Error: Chain needs to be specified' if not policy: return 'Error: Policy needs to be specified' wait = '--wait' if _has_option('--wait', family) else '' cmd = '{0} {1} -t {2} -P {3} {4}'.format( _iptables_cmd(family), wait, table, chain, policy) out = __salt__['cmd.run'](cmd) return out
Set the current policy for the specified table/chain CLI Example: .. code-block:: bash salt '*' iptables.set_policy filter INPUT ACCEPT IPv6: salt '*' iptables.set_policy filter INPUT ACCEPT family=ipv6
Below is the the instruction that describes the task: ### Input: Set the current policy for the specified table/chain CLI Example: .. code-block:: bash salt '*' iptables.set_policy filter INPUT ACCEPT IPv6: salt '*' iptables.set_policy filter INPUT ACCEPT family=ipv6 ### Response: def set_policy(table='filter', chain=None, policy=None, family='ipv4'): ''' Set the current policy for the specified table/chain CLI Example: .. code-block:: bash salt '*' iptables.set_policy filter INPUT ACCEPT IPv6: salt '*' iptables.set_policy filter INPUT ACCEPT family=ipv6 ''' if not chain: return 'Error: Chain needs to be specified' if not policy: return 'Error: Policy needs to be specified' wait = '--wait' if _has_option('--wait', family) else '' cmd = '{0} {1} -t {2} -P {3} {4}'.format( _iptables_cmd(family), wait, table, chain, policy) out = __salt__['cmd.run'](cmd) return out
def unShare(sharedItem): """ Remove all instances of this item from public or shared view. """ sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()
Remove all instances of this item from public or shared view.
Below is the the instruction that describes the task: ### Input: Remove all instances of this item from public or shared view. ### Response: def unShare(sharedItem): """ Remove all instances of this item from public or shared view. """ sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()
def create_login_profile(user_name, password, region=None, key=None, keyid=None, profile=None): ''' Creates a login profile for the specified user, give the user the ability to access AWS services and the AWS Management Console. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.create_login_profile user_name password ''' user = get_user(user_name, region, key, keyid, profile) if not user: log.error('IAM user %s does not exist', user_name) return False conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: info = conn.create_login_profile(user_name, password) log.info('Created profile for IAM user %s.', user_name) return info except boto.exception.BotoServerError as e: log.debug(e) if 'Conflict' in e: log.info('Profile already exists for IAM user %s.', user_name) return 'Conflict' log.error('Failed to update profile for IAM user %s.', user_name) return False
Creates a login profile for the specified user, give the user the ability to access AWS services and the AWS Management Console. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.create_login_profile user_name password
Below is the the instruction that describes the task: ### Input: Creates a login profile for the specified user, give the user the ability to access AWS services and the AWS Management Console. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.create_login_profile user_name password ### Response: def create_login_profile(user_name, password, region=None, key=None, keyid=None, profile=None): ''' Creates a login profile for the specified user, give the user the ability to access AWS services and the AWS Management Console. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.create_login_profile user_name password ''' user = get_user(user_name, region, key, keyid, profile) if not user: log.error('IAM user %s does not exist', user_name) return False conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: info = conn.create_login_profile(user_name, password) log.info('Created profile for IAM user %s.', user_name) return info except boto.exception.BotoServerError as e: log.debug(e) if 'Conflict' in e: log.info('Profile already exists for IAM user %s.', user_name) return 'Conflict' log.error('Failed to update profile for IAM user %s.', user_name) return False
def get_instance(self, payload): """ Build an instance of EventInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.event.EventInstance :rtype: twilio.rest.taskrouter.v1.workspace.event.EventInstance """ return EventInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
Build an instance of EventInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.event.EventInstance :rtype: twilio.rest.taskrouter.v1.workspace.event.EventInstance
Below is the the instruction that describes the task: ### Input: Build an instance of EventInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.event.EventInstance :rtype: twilio.rest.taskrouter.v1.workspace.event.EventInstance ### Response: def get_instance(self, payload): """ Build an instance of EventInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.event.EventInstance :rtype: twilio.rest.taskrouter.v1.workspace.event.EventInstance """ return EventInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
def write_file_list_cache(opts, data, list_cache, w_lock): ''' Checks the cache file to see if there is a new enough file list cache, and returns the match (if found, along with booleans used by the fileserver backend to determine if the cache needs to be refreshed/written). ''' serial = salt.payload.Serial(opts) with salt.utils.files.fopen(list_cache, 'w+b') as fp_: fp_.write(serial.dumps(data)) _unlock_cache(w_lock) log.trace('Lockfile %s removed', w_lock)
Checks the cache file to see if there is a new enough file list cache, and returns the match (if found, along with booleans used by the fileserver backend to determine if the cache needs to be refreshed/written).
Below is the the instruction that describes the task: ### Input: Checks the cache file to see if there is a new enough file list cache, and returns the match (if found, along with booleans used by the fileserver backend to determine if the cache needs to be refreshed/written). ### Response: def write_file_list_cache(opts, data, list_cache, w_lock): ''' Checks the cache file to see if there is a new enough file list cache, and returns the match (if found, along with booleans used by the fileserver backend to determine if the cache needs to be refreshed/written). ''' serial = salt.payload.Serial(opts) with salt.utils.files.fopen(list_cache, 'w+b') as fp_: fp_.write(serial.dumps(data)) _unlock_cache(w_lock) log.trace('Lockfile %s removed', w_lock)
def convert_embedding(net, node, model, builder): """Convert an embedding layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] inputs = node['inputs'] outputs = node['outputs'] arg_params, aux_params = model.get_params() W = arg_params[_get_node_name(net, inputs[1][0])].asnumpy() if not ONE_HOT_ENCODE_HACK: nC, nB = W.shape W = W.T builder.add_embedding(name = name, W = W, b = None, input_dim = nC, output_channels = nB, has_bias = False, input_name = input_name, output_name = output_name) else: W = W.T nC, nB = W.shape builder.add_inner_product(name = name, W = W, b = None, input_channels = nB, output_channels = nC, has_bias = False, input_name = input_name, output_name = output_name)
Convert an embedding layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object.
Below is the the instruction that describes the task: ### Input: Convert an embedding layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. ### Response: def convert_embedding(net, node, model, builder): """Convert an embedding layer from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. model: model An model for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] inputs = node['inputs'] outputs = node['outputs'] arg_params, aux_params = model.get_params() W = arg_params[_get_node_name(net, inputs[1][0])].asnumpy() if not ONE_HOT_ENCODE_HACK: nC, nB = W.shape W = W.T builder.add_embedding(name = name, W = W, b = None, input_dim = nC, output_channels = nB, has_bias = False, input_name = input_name, output_name = output_name) else: W = W.T nC, nB = W.shape builder.add_inner_product(name = name, W = W, b = None, input_channels = nB, output_channels = nC, has_bias = False, input_name = input_name, output_name = output_name)
def _find_closest_trackpointaA(self,Or,Op,Oz,ar,ap,az,interp=True): """ NAME: _find_closest_trackpointaA PURPOSE: find the closest point on the stream track to a given point in frequency-angle coordinates INPUT: Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point interp= (True), if True, return the index of the interpolated track OUTPUT: index into the track of the closest track point HISTORY: 2013-12-22 - Written - Bovy (IAS) """ #Calculate angle offset along the stream parallel to the stream track, # finding first the angle among a few wraps where the point is # closest to the parallel track and then the closest trackpoint to that # point da= numpy.stack(\ numpy.meshgrid(_TWOPIWRAPS+ar-self._progenitor_angle[0], _TWOPIWRAPS+ap-self._progenitor_angle[1], _TWOPIWRAPS+az-self._progenitor_angle[2], indexing='xy')).T.reshape((len(_TWOPIWRAPS)**3,3)) dapar= self._sigMeanSign*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\ numpy.cross(da,self._dsigomeanProgDirection),axis=1))], self._dsigomeanProgDirection) if interp: dist= numpy.fabs(dapar-self._interpolatedThetasTrack) else: dist= numpy.fabs(dapar-self._thetasTrack) return numpy.argmin(dist)
NAME: _find_closest_trackpointaA PURPOSE: find the closest point on the stream track to a given point in frequency-angle coordinates INPUT: Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point interp= (True), if True, return the index of the interpolated track OUTPUT: index into the track of the closest track point HISTORY: 2013-12-22 - Written - Bovy (IAS)
Below is the the instruction that describes the task: ### Input: NAME: _find_closest_trackpointaA PURPOSE: find the closest point on the stream track to a given point in frequency-angle coordinates INPUT: Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point interp= (True), if True, return the index of the interpolated track OUTPUT: index into the track of the closest track point HISTORY: 2013-12-22 - Written - Bovy (IAS) ### Response: def _find_closest_trackpointaA(self,Or,Op,Oz,ar,ap,az,interp=True): """ NAME: _find_closest_trackpointaA PURPOSE: find the closest point on the stream track to a given point in frequency-angle coordinates INPUT: Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point interp= (True), if True, return the index of the interpolated track OUTPUT: index into the track of the closest track point HISTORY: 2013-12-22 - Written - Bovy (IAS) """ #Calculate angle offset along the stream parallel to the stream track, # finding first the angle among a few wraps where the point is # closest to the parallel track and then the closest trackpoint to that # point da= numpy.stack(\ numpy.meshgrid(_TWOPIWRAPS+ar-self._progenitor_angle[0], _TWOPIWRAPS+ap-self._progenitor_angle[1], _TWOPIWRAPS+az-self._progenitor_angle[2], indexing='xy')).T.reshape((len(_TWOPIWRAPS)**3,3)) dapar= self._sigMeanSign*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\ numpy.cross(da,self._dsigomeanProgDirection),axis=1))], self._dsigomeanProgDirection) if interp: dist= numpy.fabs(dapar-self._interpolatedThetasTrack) else: dist= numpy.fabs(dapar-self._thetasTrack) return numpy.argmin(dist)
def send_command(self, command): """Send a command for FastAGI request: :param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds' :type command: String :Example: :: @asyncio.coroutine def call_waiting(request): print(['AGI variables:', request.headers]) yield from request.send_command('ANSWER') yield from request.send_command('EXEC StartMusicOnHold') yield from request.send_command('EXEC Wait 10') """ command += '\n' self.writer.write(command.encode(self.encoding)) yield from self.writer.drain() agi_result = yield from self._read_result() # If Asterisk returns `100 Trying...`, wait for next the response. while agi_result.get('status_code') == 100: agi_result = yield from self._read_result() # when we got AGIUsageError the following line contains some indication if 'error' in agi_result and agi_result['error'] == 'AGIUsageError': buff_usage_error = yield from self.reader.readline() agi_result['msg'] += buff_usage_error.decode(self.encoding) return agi_result
Send a command for FastAGI request: :param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds' :type command: String :Example: :: @asyncio.coroutine def call_waiting(request): print(['AGI variables:', request.headers]) yield from request.send_command('ANSWER') yield from request.send_command('EXEC StartMusicOnHold') yield from request.send_command('EXEC Wait 10')
Below is the the instruction that describes the task: ### Input: Send a command for FastAGI request: :param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds' :type command: String :Example: :: @asyncio.coroutine def call_waiting(request): print(['AGI variables:', request.headers]) yield from request.send_command('ANSWER') yield from request.send_command('EXEC StartMusicOnHold') yield from request.send_command('EXEC Wait 10') ### Response: def send_command(self, command): """Send a command for FastAGI request: :param command: Command to launch on FastAGI request. Ex: 'EXEC StartMusicOnHolds' :type command: String :Example: :: @asyncio.coroutine def call_waiting(request): print(['AGI variables:', request.headers]) yield from request.send_command('ANSWER') yield from request.send_command('EXEC StartMusicOnHold') yield from request.send_command('EXEC Wait 10') """ command += '\n' self.writer.write(command.encode(self.encoding)) yield from self.writer.drain() agi_result = yield from self._read_result() # If Asterisk returns `100 Trying...`, wait for next the response. while agi_result.get('status_code') == 100: agi_result = yield from self._read_result() # when we got AGIUsageError the following line contains some indication if 'error' in agi_result and agi_result['error'] == 'AGIUsageError': buff_usage_error = yield from self.reader.readline() agi_result['msg'] += buff_usage_error.decode(self.encoding) return agi_result
def address(self, is_compressed=None): """ Return the public address representation of this key, if available. """ return self._network.address.for_p2pkh(self.hash160(is_compressed=is_compressed))
Return the public address representation of this key, if available.
Below is the the instruction that describes the task: ### Input: Return the public address representation of this key, if available. ### Response: def address(self, is_compressed=None): """ Return the public address representation of this key, if available. """ return self._network.address.for_p2pkh(self.hash160(is_compressed=is_compressed))
def export_public_keys(env=None, sp=subprocess): """Export all GPG public keys.""" args = gpg_command(['--export']) result = check_output(args=args, env=env, sp=sp) if not result: raise KeyError('No GPG public keys found at env: {!r}'.format(env)) return result
Export all GPG public keys.
Below is the the instruction that describes the task: ### Input: Export all GPG public keys. ### Response: def export_public_keys(env=None, sp=subprocess): """Export all GPG public keys.""" args = gpg_command(['--export']) result = check_output(args=args, env=env, sp=sp) if not result: raise KeyError('No GPG public keys found at env: {!r}'.format(env)) return result
def read(self, filename=None, read_detection_catalog=True): """ Read a Party from a file. :type filename: str :param filename: File to read from - can be a list of files, and can contain wildcards. :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. .. rubric:: Example >>> Party().read() Party of 4 Families. """ tribe = Tribe() families = [] if filename is None: # If there is no filename given, then read the example. filename = os.path.join(os.path.dirname(__file__), '..', 'tests', 'test_data', 'test_party.tgz') if isinstance(filename, list): filenames = [] for _filename in filename: # Expand wildcards filenames.extend(glob.glob(_filename)) else: # Expand wildcards filenames = glob.glob(filename) for _filename in filenames: with tarfile.open(_filename, "r:*") as arc: temp_dir = tempfile.mkdtemp() arc.extractall(path=temp_dir, members=_safemembers(arc)) # Read in the detections first, this way, if we read from multiple # files then we can just read in extra templates as needed. # Read in families here! party_dir = glob.glob(temp_dir + os.sep + '*')[0] tribe._read_from_folder(dirname=party_dir) det_cat_file = glob.glob(os.path.join(party_dir, "catalog.*")) if len(det_cat_file) != 0 and read_detection_catalog: try: all_cat = read_events(det_cat_file[0]) except TypeError as e: print(e) pass else: all_cat = Catalog() for family_file in glob.glob(join(party_dir, '*_detections.csv')): template = [ t for t in tribe if _templates_match(t, family_file)] family = Family(template=template[0] or Template()) new_family = True if family.template.name in [f.template.name for f in families]: family = [ f for f in families if f.template.name == family.template.name][0] new_family = False family.detections = _read_family( fname=family_file, all_cat=all_cat, template=template[0]) if new_family: families.append(family) shutil.rmtree(temp_dir) self.families = families return self
Read a Party from a file. :type filename: str :param filename: File to read from - can be a list of files, and can contain wildcards. :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. .. rubric:: Example >>> Party().read() Party of 4 Families.
Below is the the instruction that describes the task: ### Input: Read a Party from a file. :type filename: str :param filename: File to read from - can be a list of files, and can contain wildcards. :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. .. rubric:: Example >>> Party().read() Party of 4 Families. ### Response: def read(self, filename=None, read_detection_catalog=True): """ Read a Party from a file. :type filename: str :param filename: File to read from - can be a list of files, and can contain wildcards. :type read_detection_catalog: bool :param read_detection_catalog: Whether to read the detection catalog or not, if False, catalog will be regenerated - for large catalogs this can be faster. .. rubric:: Example >>> Party().read() Party of 4 Families. """ tribe = Tribe() families = [] if filename is None: # If there is no filename given, then read the example. filename = os.path.join(os.path.dirname(__file__), '..', 'tests', 'test_data', 'test_party.tgz') if isinstance(filename, list): filenames = [] for _filename in filename: # Expand wildcards filenames.extend(glob.glob(_filename)) else: # Expand wildcards filenames = glob.glob(filename) for _filename in filenames: with tarfile.open(_filename, "r:*") as arc: temp_dir = tempfile.mkdtemp() arc.extractall(path=temp_dir, members=_safemembers(arc)) # Read in the detections first, this way, if we read from multiple # files then we can just read in extra templates as needed. # Read in families here! party_dir = glob.glob(temp_dir + os.sep + '*')[0] tribe._read_from_folder(dirname=party_dir) det_cat_file = glob.glob(os.path.join(party_dir, "catalog.*")) if len(det_cat_file) != 0 and read_detection_catalog: try: all_cat = read_events(det_cat_file[0]) except TypeError as e: print(e) pass else: all_cat = Catalog() for family_file in glob.glob(join(party_dir, '*_detections.csv')): template = [ t for t in tribe if _templates_match(t, family_file)] family = Family(template=template[0] or Template()) new_family = True if family.template.name in [f.template.name for f in families]: family = [ f for f in families if f.template.name == family.template.name][0] new_family = False family.detections = _read_family( fname=family_file, all_cat=all_cat, template=template[0]) if new_family: families.append(family) shutil.rmtree(temp_dir) self.families = families return self
def copy(cls, data): """Set the clipboard data ('Copy'). Parameters: data to set (string) Optional: datatype if it's not a string Returns: True / False on successful copy, Any exception raised (like passes the NSPasteboardCommunicationError) should be caught by the caller. """ pp = pprint.PrettyPrinter() copy_data = 'Data to copy (put in pasteboard): %s' logging.debug(copy_data % pp.pformat(data)) # Clear the pasteboard first: cleared = cls.clearAll() if not cleared: logging.warning('Clipboard could not clear properly') return False # Prepare to write the data # If we just use writeObjects the sequence to write to the clipboard is # a) Call clearContents() # b) Call writeObjects() with a list of objects to write to the # clipboard if not isinstance(data, types.ListType): data = [data] pb = AppKit.NSPasteboard.generalPasteboard() pb_set_ok = pb.writeObjects_(data) return bool(pb_set_ok)
Set the clipboard data ('Copy'). Parameters: data to set (string) Optional: datatype if it's not a string Returns: True / False on successful copy, Any exception raised (like passes the NSPasteboardCommunicationError) should be caught by the caller.
Below is the the instruction that describes the task: ### Input: Set the clipboard data ('Copy'). Parameters: data to set (string) Optional: datatype if it's not a string Returns: True / False on successful copy, Any exception raised (like passes the NSPasteboardCommunicationError) should be caught by the caller. ### Response: def copy(cls, data): """Set the clipboard data ('Copy'). Parameters: data to set (string) Optional: datatype if it's not a string Returns: True / False on successful copy, Any exception raised (like passes the NSPasteboardCommunicationError) should be caught by the caller. """ pp = pprint.PrettyPrinter() copy_data = 'Data to copy (put in pasteboard): %s' logging.debug(copy_data % pp.pformat(data)) # Clear the pasteboard first: cleared = cls.clearAll() if not cleared: logging.warning('Clipboard could not clear properly') return False # Prepare to write the data # If we just use writeObjects the sequence to write to the clipboard is # a) Call clearContents() # b) Call writeObjects() with a list of objects to write to the # clipboard if not isinstance(data, types.ListType): data = [data] pb = AppKit.NSPasteboard.generalPasteboard() pb_set_ok = pb.writeObjects_(data) return bool(pb_set_ok)
def encode(self): """This method encodes the data into a binary string using the appropriate algorithm specified by the mode. """ if self.mode == tables.modes['alphanumeric']: encoded = self.encode_alphanumeric() elif self.mode == tables.modes['numeric']: encoded = self.encode_numeric() elif self.mode == tables.modes['binary']: encoded = self.encode_bytes() elif self.mode == tables.modes['kanji']: encoded = self.encode_kanji() return encoded
This method encodes the data into a binary string using the appropriate algorithm specified by the mode.
Below is the the instruction that describes the task: ### Input: This method encodes the data into a binary string using the appropriate algorithm specified by the mode. ### Response: def encode(self): """This method encodes the data into a binary string using the appropriate algorithm specified by the mode. """ if self.mode == tables.modes['alphanumeric']: encoded = self.encode_alphanumeric() elif self.mode == tables.modes['numeric']: encoded = self.encode_numeric() elif self.mode == tables.modes['binary']: encoded = self.encode_bytes() elif self.mode == tables.modes['kanji']: encoded = self.encode_kanji() return encoded
def get_rejected_variables(self, threshold=0.9): """Return a list of variable names being rejected for high correlation with one of remaining variables. Parameters: ---------- threshold : float Correlation value which is above the threshold are rejected Returns ------- list The list of rejected variables or an empty list if the correlation has not been computed. """ variable_profile = self.description_set['variables'] result = [] if hasattr(variable_profile, 'correlation'): result = variable_profile.index[variable_profile.correlation > threshold].tolist() return result
Return a list of variable names being rejected for high correlation with one of remaining variables. Parameters: ---------- threshold : float Correlation value which is above the threshold are rejected Returns ------- list The list of rejected variables or an empty list if the correlation has not been computed.
Below is the the instruction that describes the task: ### Input: Return a list of variable names being rejected for high correlation with one of remaining variables. Parameters: ---------- threshold : float Correlation value which is above the threshold are rejected Returns ------- list The list of rejected variables or an empty list if the correlation has not been computed. ### Response: def get_rejected_variables(self, threshold=0.9): """Return a list of variable names being rejected for high correlation with one of remaining variables. Parameters: ---------- threshold : float Correlation value which is above the threshold are rejected Returns ------- list The list of rejected variables or an empty list if the correlation has not been computed. """ variable_profile = self.description_set['variables'] result = [] if hasattr(variable_profile, 'correlation'): result = variable_profile.index[variable_profile.correlation > threshold].tolist() return result
def __load_jco(self): """private method to set the jco attribute from a file or a matrix object """ if self.jco_arg is None: return None #raise Exception("linear_analysis.__load_jco(): jco_arg is None") if isinstance(self.jco_arg, Matrix): self.__jco = self.jco_arg elif isinstance(self.jco_arg, str): self.__jco = self.__fromfile(self.jco_arg,astype=Jco) else: raise Exception("linear_analysis.__load_jco(): jco_arg must " + "be a matrix object or a file name: " + str(self.jco_arg))
private method to set the jco attribute from a file or a matrix object
Below is the the instruction that describes the task: ### Input: private method to set the jco attribute from a file or a matrix object ### Response: def __load_jco(self): """private method to set the jco attribute from a file or a matrix object """ if self.jco_arg is None: return None #raise Exception("linear_analysis.__load_jco(): jco_arg is None") if isinstance(self.jco_arg, Matrix): self.__jco = self.jco_arg elif isinstance(self.jco_arg, str): self.__jco = self.__fromfile(self.jco_arg,astype=Jco) else: raise Exception("linear_analysis.__load_jco(): jco_arg must " + "be a matrix object or a file name: " + str(self.jco_arg))
def validate_generations_for_story_elements( sender, instance, action, target_node_type=None, target_node=None, pos=None, *args, **kwargs ): ''' Unlike arc nodes, for which we just warn about structure, the story tree allowed parent/child rules must be strictly enforced. ''' if action == 'add_child': if instance.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, instance.story_element_type))) if action == 'update': parent = instance.get_parent() children = instance.get_children() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if children: for child in children: if target_node_type not in STORY_NODE_ELEMENT_DEFINITIONS[child.story_element_type]['allowed_parents']: raise IntegrityError(_('%s is not permitted to be a parent of %s' % ( target_node_type, child.story_element_type))) if action == 'add_sibling': parent = instance.get_parent() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if action == 'move': if not pos or 'sibling' in pos or 'right' in pos or 'left' in pos: parent = target_node.get_parent() if (parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, parent.story_element_type ))) if 'child' in pos: if (target_node.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, target_node.story_element_type )))
Unlike arc nodes, for which we just warn about structure, the story tree allowed parent/child rules must be strictly enforced.
Below is the the instruction that describes the task: ### Input: Unlike arc nodes, for which we just warn about structure, the story tree allowed parent/child rules must be strictly enforced. ### Response: def validate_generations_for_story_elements( sender, instance, action, target_node_type=None, target_node=None, pos=None, *args, **kwargs ): ''' Unlike arc nodes, for which we just warn about structure, the story tree allowed parent/child rules must be strictly enforced. ''' if action == 'add_child': if instance.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, instance.story_element_type))) if action == 'update': parent = instance.get_parent() children = instance.get_children() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if children: for child in children: if target_node_type not in STORY_NODE_ELEMENT_DEFINITIONS[child.story_element_type]['allowed_parents']: raise IntegrityError(_('%s is not permitted to be a parent of %s' % ( target_node_type, child.story_element_type))) if action == 'add_sibling': parent = instance.get_parent() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if action == 'move': if not pos or 'sibling' in pos or 'right' in pos or 'left' in pos: parent = target_node.get_parent() if (parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, parent.story_element_type ))) if 'child' in pos: if (target_node.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, target_node.story_element_type )))
def restore(self): """ Restore the 'output/' directory structure based on the `dir_structure.json` file. """ # We get the structure we have to create/apply. structure = self._get_structure() # We get the list of key which is implicitly the list of directory to recreate. list_of_key = list(structure.keys()) # We move to the content of the parent as we know that we are creating only one directory. # Note: if one day we will have to create multiple directory, we will have to change # the following. structure = structure[list_of_key[0]] # We also set the parent directory as we are going to construct its childen. parent_path = list_of_key[0] if not parent_path.endswith(PyFunceble.directory_separator): parent_path += PyFunceble.directory_separator # We get if we have to replace `.gitignore` to `.keep` and versa. replacement_status = self._restore_replace() for directory in structure: # We loop through the list of directory to create. # We construct the full path. base = self.base + parent_path + directory if not base.endswith(PyFunceble.directory_separator): base += PyFunceble.directory_separator # We create the constructed path if it does not exist. self._create_directory(base) for file in structure[directory]: # We loop through the list of files in the currently read directory. # We construct the full file path.s file_path = base + file # We get the file content. content_to_write = structure[directory][file]["content"] # And its sha512 checksum. online_sha = structure[directory][file]["sha512"] # We update the content to write by replacing our glue with `\n`. content_to_write = Regex( content_to_write, "@@@", escape=True, replace_with="\\n" ).replace() # We get the file path as .keep. git_to_keep = file_path.replace("gitignore", "keep") # We get the file path as .gitignore. keep_to_git = file_path.replace("keep", "gitignore") if replacement_status: # We have to replace every .gitignore to .keep. if ( PyFunceble.path.isfile(file_path) and Hash(file_path, "sha512", True).get() == online_sha ): # * The currently read file exist. # and # * Its sha512sum is equal to the one we have in our structure. # We rename the file. PyFunceble.rename(file_path, git_to_keep) # And we disallow the file writing. write = False else: # * The currently read file does not exist. # or # * Its sha512sum is not equal to the one we have in our structure. # We delere the file if it does exist. File(file_path).delete() # We update the file path. file_path = git_to_keep # And we allow the file writing. write = True else: # We have to replace every .keep to .gitignore. if ( PyFunceble.path.isfile(keep_to_git) and Hash(file_path, "sha512", True).get() == online_sha ): # * The .keep file exist. # and # * Its sha512sum is equal to the one we have in our structure. # We rename the file. PyFunceble.rename(file_path, keep_to_git) # And we disallow the file writing. write = False else: # * The .keep file does not exist. # or # * Its sha512sum is not equal to the one we have in our structure. # We delete the file if it exist. File(keep_to_git).delete() # We update the file path file_path = keep_to_git # And we allow the file writing. write = True if write: # The file writing is allowed. # We write our file content into the file path. File(file_path).write(content_to_write + "\n", True) self.delete_uneeded()
Restore the 'output/' directory structure based on the `dir_structure.json` file.
Below is the the instruction that describes the task: ### Input: Restore the 'output/' directory structure based on the `dir_structure.json` file. ### Response: def restore(self): """ Restore the 'output/' directory structure based on the `dir_structure.json` file. """ # We get the structure we have to create/apply. structure = self._get_structure() # We get the list of key which is implicitly the list of directory to recreate. list_of_key = list(structure.keys()) # We move to the content of the parent as we know that we are creating only one directory. # Note: if one day we will have to create multiple directory, we will have to change # the following. structure = structure[list_of_key[0]] # We also set the parent directory as we are going to construct its childen. parent_path = list_of_key[0] if not parent_path.endswith(PyFunceble.directory_separator): parent_path += PyFunceble.directory_separator # We get if we have to replace `.gitignore` to `.keep` and versa. replacement_status = self._restore_replace() for directory in structure: # We loop through the list of directory to create. # We construct the full path. base = self.base + parent_path + directory if not base.endswith(PyFunceble.directory_separator): base += PyFunceble.directory_separator # We create the constructed path if it does not exist. self._create_directory(base) for file in structure[directory]: # We loop through the list of files in the currently read directory. # We construct the full file path.s file_path = base + file # We get the file content. content_to_write = structure[directory][file]["content"] # And its sha512 checksum. online_sha = structure[directory][file]["sha512"] # We update the content to write by replacing our glue with `\n`. content_to_write = Regex( content_to_write, "@@@", escape=True, replace_with="\\n" ).replace() # We get the file path as .keep. git_to_keep = file_path.replace("gitignore", "keep") # We get the file path as .gitignore. keep_to_git = file_path.replace("keep", "gitignore") if replacement_status: # We have to replace every .gitignore to .keep. if ( PyFunceble.path.isfile(file_path) and Hash(file_path, "sha512", True).get() == online_sha ): # * The currently read file exist. # and # * Its sha512sum is equal to the one we have in our structure. # We rename the file. PyFunceble.rename(file_path, git_to_keep) # And we disallow the file writing. write = False else: # * The currently read file does not exist. # or # * Its sha512sum is not equal to the one we have in our structure. # We delere the file if it does exist. File(file_path).delete() # We update the file path. file_path = git_to_keep # And we allow the file writing. write = True else: # We have to replace every .keep to .gitignore. if ( PyFunceble.path.isfile(keep_to_git) and Hash(file_path, "sha512", True).get() == online_sha ): # * The .keep file exist. # and # * Its sha512sum is equal to the one we have in our structure. # We rename the file. PyFunceble.rename(file_path, keep_to_git) # And we disallow the file writing. write = False else: # * The .keep file does not exist. # or # * Its sha512sum is not equal to the one we have in our structure. # We delete the file if it exist. File(keep_to_git).delete() # We update the file path file_path = keep_to_git # And we allow the file writing. write = True if write: # The file writing is allowed. # We write our file content into the file path. File(file_path).write(content_to_write + "\n", True) self.delete_uneeded()
def get_tripIs_within_range_by_dsut(self, start_time_ut, end_time_ut): """ Obtain a list of trip_Is that take place during a time interval. The trip needs to be only partially overlapping with the given time interval. The grouping by dsut (day_start_ut) is required as same trip_I could take place on multiple days. Parameters ---------- start_time_ut : int start of the time interval in unix time (seconds) end_time_ut: int end of the time interval in unix time (seconds) Returns ------- trip_I_dict: dict keys: day_start_times to list of integers (trip_Is) """ cur = self.conn.cursor() assert start_time_ut <= end_time_ut dst_ut, st_ds, et_ds = \ self._get_possible_day_starts(start_time_ut, end_time_ut, 7) # noinspection PyTypeChecker assert len(dst_ut) >= 0 trip_I_dict = {} for day_start_ut, start_ds, end_ds in \ zip(dst_ut, st_ds, et_ds): query = """ SELECT distinct(trip_I) FROM days JOIN trips USING(trip_I) WHERE (days.day_start_ut == ?) AND ( (trips.start_time_ds <= ?) AND (trips.end_time_ds >= ?) ) """ params = (day_start_ut, end_ds, start_ds) trip_Is = [el[0] for el in cur.execute(query, params)] if len(trip_Is) > 0: trip_I_dict[day_start_ut] = trip_Is return trip_I_dict
Obtain a list of trip_Is that take place during a time interval. The trip needs to be only partially overlapping with the given time interval. The grouping by dsut (day_start_ut) is required as same trip_I could take place on multiple days. Parameters ---------- start_time_ut : int start of the time interval in unix time (seconds) end_time_ut: int end of the time interval in unix time (seconds) Returns ------- trip_I_dict: dict keys: day_start_times to list of integers (trip_Is)
Below is the the instruction that describes the task: ### Input: Obtain a list of trip_Is that take place during a time interval. The trip needs to be only partially overlapping with the given time interval. The grouping by dsut (day_start_ut) is required as same trip_I could take place on multiple days. Parameters ---------- start_time_ut : int start of the time interval in unix time (seconds) end_time_ut: int end of the time interval in unix time (seconds) Returns ------- trip_I_dict: dict keys: day_start_times to list of integers (trip_Is) ### Response: def get_tripIs_within_range_by_dsut(self, start_time_ut, end_time_ut): """ Obtain a list of trip_Is that take place during a time interval. The trip needs to be only partially overlapping with the given time interval. The grouping by dsut (day_start_ut) is required as same trip_I could take place on multiple days. Parameters ---------- start_time_ut : int start of the time interval in unix time (seconds) end_time_ut: int end of the time interval in unix time (seconds) Returns ------- trip_I_dict: dict keys: day_start_times to list of integers (trip_Is) """ cur = self.conn.cursor() assert start_time_ut <= end_time_ut dst_ut, st_ds, et_ds = \ self._get_possible_day_starts(start_time_ut, end_time_ut, 7) # noinspection PyTypeChecker assert len(dst_ut) >= 0 trip_I_dict = {} for day_start_ut, start_ds, end_ds in \ zip(dst_ut, st_ds, et_ds): query = """ SELECT distinct(trip_I) FROM days JOIN trips USING(trip_I) WHERE (days.day_start_ut == ?) AND ( (trips.start_time_ds <= ?) AND (trips.end_time_ds >= ?) ) """ params = (day_start_ut, end_ds, start_ds) trip_Is = [el[0] for el in cur.execute(query, params)] if len(trip_Is) > 0: trip_I_dict[day_start_ut] = trip_Is return trip_I_dict
def is_existing_up_to_date(destination, latest_version): """Returns False if there is no existing install or if the existing install is out of date. Otherwise, returns True.""" version_path = os.path.join( destination, 'google_appengine', 'VERSION') if not os.path.exists(version_path): return False with open(version_path, 'r') as f: version_line = f.readline() match = SDK_RELEASE_RE.match(version_line) if not match: print('Unable to parse version from:', version_line) return False version = [int(x) for x in match.groups()] return version >= latest_version
Returns False if there is no existing install or if the existing install is out of date. Otherwise, returns True.
Below is the the instruction that describes the task: ### Input: Returns False if there is no existing install or if the existing install is out of date. Otherwise, returns True. ### Response: def is_existing_up_to_date(destination, latest_version): """Returns False if there is no existing install or if the existing install is out of date. Otherwise, returns True.""" version_path = os.path.join( destination, 'google_appengine', 'VERSION') if not os.path.exists(version_path): return False with open(version_path, 'r') as f: version_line = f.readline() match = SDK_RELEASE_RE.match(version_line) if not match: print('Unable to parse version from:', version_line) return False version = [int(x) for x in match.groups()] return version >= latest_version
def get_protocol_sequence(self,sweep): """ given a sweep, return the protocol as condensed sequence. This is better for comparing similarities and determining steps. There should be no duplicate numbers. """ self.setsweep(sweep) return list(self.protoSeqX),list(self.protoSeqY)
given a sweep, return the protocol as condensed sequence. This is better for comparing similarities and determining steps. There should be no duplicate numbers.
Below is the the instruction that describes the task: ### Input: given a sweep, return the protocol as condensed sequence. This is better for comparing similarities and determining steps. There should be no duplicate numbers. ### Response: def get_protocol_sequence(self,sweep): """ given a sweep, return the protocol as condensed sequence. This is better for comparing similarities and determining steps. There should be no duplicate numbers. """ self.setsweep(sweep) return list(self.protoSeqX),list(self.protoSeqY)
def at_depth(self, level): """ Locate the last config item at a specified depth """ return Zconfig(lib.zconfig_at_depth(self._as_parameter_, level), False)
Locate the last config item at a specified depth
Below is the the instruction that describes the task: ### Input: Locate the last config item at a specified depth ### Response: def at_depth(self, level): """ Locate the last config item at a specified depth """ return Zconfig(lib.zconfig_at_depth(self._as_parameter_, level), False)
def delete_plate(self, name): '''Deletes a plate. Parameters ---------- name: str name of the plate that should be deleted See also -------- :func:`tmserver.api.plate.delete_plate` :class:`tmlib.models.plate.Plate` ''' logger.info( 'delete plate "%s" of experiment "%s"', name, self.experiment_name ) plate_id = self._get_plate_id(name) url = self._build_api_url( '/experiments/{experiment_id}/plates/{plate_id}'.format( experiment_id=self._experiment_id, plate_id=plate_id ) ) res = self._session.delete(url) res.raise_for_status()
Deletes a plate. Parameters ---------- name: str name of the plate that should be deleted See also -------- :func:`tmserver.api.plate.delete_plate` :class:`tmlib.models.plate.Plate`
Below is the the instruction that describes the task: ### Input: Deletes a plate. Parameters ---------- name: str name of the plate that should be deleted See also -------- :func:`tmserver.api.plate.delete_plate` :class:`tmlib.models.plate.Plate` ### Response: def delete_plate(self, name): '''Deletes a plate. Parameters ---------- name: str name of the plate that should be deleted See also -------- :func:`tmserver.api.plate.delete_plate` :class:`tmlib.models.plate.Plate` ''' logger.info( 'delete plate "%s" of experiment "%s"', name, self.experiment_name ) plate_id = self._get_plate_id(name) url = self._build_api_url( '/experiments/{experiment_id}/plates/{plate_id}'.format( experiment_id=self._experiment_id, plate_id=plate_id ) ) res = self._session.delete(url) res.raise_for_status()
def gen_multi_key(self, values, db='default'): """Takes a list of generations (not table keys) and returns a key.""" db = settings.DB_CACHE_KEYS[db] if db and len(db) > 100: db = db[0:68] + self.gen_key(db[68:]) return '%s_%s_multi_%s' % (self.prefix, db, self.gen_key(*values))
Takes a list of generations (not table keys) and returns a key.
Below is the the instruction that describes the task: ### Input: Takes a list of generations (not table keys) and returns a key. ### Response: def gen_multi_key(self, values, db='default'): """Takes a list of generations (not table keys) and returns a key.""" db = settings.DB_CACHE_KEYS[db] if db and len(db) > 100: db = db[0:68] + self.gen_key(db[68:]) return '%s_%s_multi_%s' % (self.prefix, db, self.gen_key(*values))
def command(state, args): """Unregister watching regexp for an anime.""" args = parser.parse_args(args[1:]) if args.complete: query.files.delete_regexp_complete(state.db) else: if args.aid is None: parser.print_help() else: aid = state.results.parse_aid(args.aid, default_key='db') query.files.delete_regexp(state.db, aid)
Unregister watching regexp for an anime.
Below is the the instruction that describes the task: ### Input: Unregister watching regexp for an anime. ### Response: def command(state, args): """Unregister watching regexp for an anime.""" args = parser.parse_args(args[1:]) if args.complete: query.files.delete_regexp_complete(state.db) else: if args.aid is None: parser.print_help() else: aid = state.results.parse_aid(args.aid, default_key='db') query.files.delete_regexp(state.db, aid)
def ylim(self, low, high, index=1): """Set yaxis limits. Parameters ---------- low : number high : number index : int, optional Returns ------- Chart """ self.layout['yaxis' + str(index)]['range'] = [low, high] return self
Set yaxis limits. Parameters ---------- low : number high : number index : int, optional Returns ------- Chart
Below is the the instruction that describes the task: ### Input: Set yaxis limits. Parameters ---------- low : number high : number index : int, optional Returns ------- Chart ### Response: def ylim(self, low, high, index=1): """Set yaxis limits. Parameters ---------- low : number high : number index : int, optional Returns ------- Chart """ self.layout['yaxis' + str(index)]['range'] = [low, high] return self
def _rename_hstore_required(self, old_table_name, new_table_name, old_field, new_field, key): """Renames an existing REQUIRED CONSTRAINT for the specified hstore key.""" old_name = self._required_constraint_name( old_table_name, old_field, key) new_name = self._required_constraint_name( new_table_name, new_field, key) sql = self.sql_hstore_required_rename.format( table=self.quote_name(new_table_name), old_name=self.quote_name(old_name), new_name=self.quote_name(new_name) ) self.execute(sql)
Renames an existing REQUIRED CONSTRAINT for the specified hstore key.
Below is the the instruction that describes the task: ### Input: Renames an existing REQUIRED CONSTRAINT for the specified hstore key. ### Response: def _rename_hstore_required(self, old_table_name, new_table_name, old_field, new_field, key): """Renames an existing REQUIRED CONSTRAINT for the specified hstore key.""" old_name = self._required_constraint_name( old_table_name, old_field, key) new_name = self._required_constraint_name( new_table_name, new_field, key) sql = self.sql_hstore_required_rename.format( table=self.quote_name(new_table_name), old_name=self.quote_name(old_name), new_name=self.quote_name(new_name) ) self.execute(sql)
def main(): """Parse the command-line arguments and run the bot.""" parser = argparse.ArgumentParser(description = 'XMPP echo bot', parents = [XMPPSettings.get_arg_parser()]) parser.add_argument('jid', metavar = 'JID', help = 'The bot JID') parser.add_argument('--debug', action = 'store_const', dest = 'log_level', const = logging.DEBUG, default = logging.INFO, help = 'Print debug messages') parser.add_argument('--quiet', const = logging.ERROR, action = 'store_const', dest = 'log_level', help = 'Print only error messages') parser.add_argument('--trace', action = 'store_true', help = 'Print XML data sent and received') args = parser.parse_args() settings = XMPPSettings({ "software_name": "Echo Bot" }) settings.load_arguments(args) if settings.get("password") is None: password = getpass("{0!r} password: ".format(args.jid)) if sys.version_info.major < 3: password = password.decode("utf-8") settings["password"] = password if sys.version_info.major < 3: args.jid = args.jid.decode("utf-8") logging.basicConfig(level = args.log_level) if args.trace: print "enabling trace" handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"): logger = logging.getLogger(logger) logger.setLevel(logging.DEBUG) logger.addHandler(handler) logger.propagate = False bot = EchoBot(JID(args.jid), settings) try: bot.run() except KeyboardInterrupt: bot.disconnect()
Parse the command-line arguments and run the bot.
Below is the the instruction that describes the task: ### Input: Parse the command-line arguments and run the bot. ### Response: def main(): """Parse the command-line arguments and run the bot.""" parser = argparse.ArgumentParser(description = 'XMPP echo bot', parents = [XMPPSettings.get_arg_parser()]) parser.add_argument('jid', metavar = 'JID', help = 'The bot JID') parser.add_argument('--debug', action = 'store_const', dest = 'log_level', const = logging.DEBUG, default = logging.INFO, help = 'Print debug messages') parser.add_argument('--quiet', const = logging.ERROR, action = 'store_const', dest = 'log_level', help = 'Print only error messages') parser.add_argument('--trace', action = 'store_true', help = 'Print XML data sent and received') args = parser.parse_args() settings = XMPPSettings({ "software_name": "Echo Bot" }) settings.load_arguments(args) if settings.get("password") is None: password = getpass("{0!r} password: ".format(args.jid)) if sys.version_info.major < 3: password = password.decode("utf-8") settings["password"] = password if sys.version_info.major < 3: args.jid = args.jid.decode("utf-8") logging.basicConfig(level = args.log_level) if args.trace: print "enabling trace" handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"): logger = logging.getLogger(logger) logger.setLevel(logging.DEBUG) logger.addHandler(handler) logger.propagate = False bot = EchoBot(JID(args.jid), settings) try: bot.run() except KeyboardInterrupt: bot.disconnect()
def by_id(cls, id, conn=None, google_user=None, google_password=None): """ Open a spreadsheet via its resource ID. This is more precise than opening a document by title, and should be used with preference. """ conn = Connection.connect(conn=conn, google_user=google_user, google_password=google_password) return cls(id=id, conn=conn)
Open a spreadsheet via its resource ID. This is more precise than opening a document by title, and should be used with preference.
Below is the the instruction that describes the task: ### Input: Open a spreadsheet via its resource ID. This is more precise than opening a document by title, and should be used with preference. ### Response: def by_id(cls, id, conn=None, google_user=None, google_password=None): """ Open a spreadsheet via its resource ID. This is more precise than opening a document by title, and should be used with preference. """ conn = Connection.connect(conn=conn, google_user=google_user, google_password=google_password) return cls(id=id, conn=conn)
def _make_A_and_part_of_b_adjacent(self, ref_crds): """ Make A and part of b. See docstring of this class for answer to "What are A and b?" """ rot = self._rotate_rows(ref_crds) A = 2*(rot - ref_crds) partial_b = (rot**2 - ref_crds**2).sum(1) return A, partial_b
Make A and part of b. See docstring of this class for answer to "What are A and b?"
Below is the the instruction that describes the task: ### Input: Make A and part of b. See docstring of this class for answer to "What are A and b?" ### Response: def _make_A_and_part_of_b_adjacent(self, ref_crds): """ Make A and part of b. See docstring of this class for answer to "What are A and b?" """ rot = self._rotate_rows(ref_crds) A = 2*(rot - ref_crds) partial_b = (rot**2 - ref_crds**2).sum(1) return A, partial_b
def process_route_spec_config(con, vpc_info, route_spec, failed_ips, questionable_ips): """ Look through the route spec and update routes accordingly. Idea: Make sure we have a route for each CIDR. If we have a route to any of the IP addresses for a given CIDR then we are good. Otherwise, pick one (usually the first) IP and create a route to that IP. If a route points at a failed or questionable IP then a new candidate is chosen, if possible. """ if CURRENT_STATE._stop_all: logging.debug("Routespec processing. Stop requested, abort operation") return if failed_ips: logging.debug("Route spec processing. Failed IPs: %s" % ",".join(failed_ips)) else: logging.debug("Route spec processing. No failed IPs.") # Iterate over all the routes in the VPC, check they are contained in # the spec, update the routes as needed. # Need to remember the routes we saw in different RTs, so that we can later # add them, if needed. routes_in_rts = {} CURRENT_STATE.vpc_state.setdefault("time", datetime.datetime.now().isoformat()) # Passed through the functions and filled in, state accumulates information # about all the routes we encounted in the VPC and what we are doing with # them. This is then available in the CURRENT_STATE chosen_routers = _update_existing_routes(route_spec, failed_ips, questionable_ips, vpc_info, con, routes_in_rts) # Now go over all the routes in the spec and add those that aren't in VPC, # yet. _add_missing_routes(route_spec, failed_ips, questionable_ips, chosen_routers, vpc_info, con, routes_in_rts)
Look through the route spec and update routes accordingly. Idea: Make sure we have a route for each CIDR. If we have a route to any of the IP addresses for a given CIDR then we are good. Otherwise, pick one (usually the first) IP and create a route to that IP. If a route points at a failed or questionable IP then a new candidate is chosen, if possible.
Below is the the instruction that describes the task: ### Input: Look through the route spec and update routes accordingly. Idea: Make sure we have a route for each CIDR. If we have a route to any of the IP addresses for a given CIDR then we are good. Otherwise, pick one (usually the first) IP and create a route to that IP. If a route points at a failed or questionable IP then a new candidate is chosen, if possible. ### Response: def process_route_spec_config(con, vpc_info, route_spec, failed_ips, questionable_ips): """ Look through the route spec and update routes accordingly. Idea: Make sure we have a route for each CIDR. If we have a route to any of the IP addresses for a given CIDR then we are good. Otherwise, pick one (usually the first) IP and create a route to that IP. If a route points at a failed or questionable IP then a new candidate is chosen, if possible. """ if CURRENT_STATE._stop_all: logging.debug("Routespec processing. Stop requested, abort operation") return if failed_ips: logging.debug("Route spec processing. Failed IPs: %s" % ",".join(failed_ips)) else: logging.debug("Route spec processing. No failed IPs.") # Iterate over all the routes in the VPC, check they are contained in # the spec, update the routes as needed. # Need to remember the routes we saw in different RTs, so that we can later # add them, if needed. routes_in_rts = {} CURRENT_STATE.vpc_state.setdefault("time", datetime.datetime.now().isoformat()) # Passed through the functions and filled in, state accumulates information # about all the routes we encounted in the VPC and what we are doing with # them. This is then available in the CURRENT_STATE chosen_routers = _update_existing_routes(route_spec, failed_ips, questionable_ips, vpc_info, con, routes_in_rts) # Now go over all the routes in the spec and add those that aren't in VPC, # yet. _add_missing_routes(route_spec, failed_ips, questionable_ips, chosen_routers, vpc_info, con, routes_in_rts)
def get(self, max=None, timeout=None, wait=None): """Deprecated. Use Queue.reserve() instead. Executes an HTTP request to get a message off of a queue. Keyword arguments: max -- The maximum number of messages to pull. Defaults to 1. """ response = self.reserve(max, timeout, wait) return response
Deprecated. Use Queue.reserve() instead. Executes an HTTP request to get a message off of a queue. Keyword arguments: max -- The maximum number of messages to pull. Defaults to 1.
Below is the the instruction that describes the task: ### Input: Deprecated. Use Queue.reserve() instead. Executes an HTTP request to get a message off of a queue. Keyword arguments: max -- The maximum number of messages to pull. Defaults to 1. ### Response: def get(self, max=None, timeout=None, wait=None): """Deprecated. Use Queue.reserve() instead. Executes an HTTP request to get a message off of a queue. Keyword arguments: max -- The maximum number of messages to pull. Defaults to 1. """ response = self.reserve(max, timeout, wait) return response
def save_json_file( file, val, pretty=False, compact=True, sort=True, encoder=None ): """ Save data to json file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | list | dict :param pretty: Format data to be readable (default: False) :type pretty: bool :param compact: Format data to be compact (default: True) :type compact: bool :param sort: Sort keys (default: True) :type sort: bool :param encoder: Use custom json encoder :type encoder: T <= DateTimeEncoder :rtype: None """ # TODO: make pretty/compact into one bool? if encoder is None: encoder = DateTimeEncoder opened = False if not hasattr(file, "write"): file = io.open(file, "w", encoding="utf-8") opened = True try: if pretty: data = json.dumps( val, indent=4, separators=(',', ': '), sort_keys=sort, cls=encoder ) elif compact: data = json.dumps( val, separators=(',', ':'), sort_keys=sort, cls=encoder ) else: data = json.dumps(val, sort_keys=sort, cls=encoder) if not sys.version_info > (3, 0) and isinstance(data, str): data = data.decode("utf-8") file.write(data) finally: if opened: file.close()
Save data to json file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | list | dict :param pretty: Format data to be readable (default: False) :type pretty: bool :param compact: Format data to be compact (default: True) :type compact: bool :param sort: Sort keys (default: True) :type sort: bool :param encoder: Use custom json encoder :type encoder: T <= DateTimeEncoder :rtype: None
Below is the the instruction that describes the task: ### Input: Save data to json file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | list | dict :param pretty: Format data to be readable (default: False) :type pretty: bool :param compact: Format data to be compact (default: True) :type compact: bool :param sort: Sort keys (default: True) :type sort: bool :param encoder: Use custom json encoder :type encoder: T <= DateTimeEncoder :rtype: None ### Response: def save_json_file( file, val, pretty=False, compact=True, sort=True, encoder=None ): """ Save data to json file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | list | dict :param pretty: Format data to be readable (default: False) :type pretty: bool :param compact: Format data to be compact (default: True) :type compact: bool :param sort: Sort keys (default: True) :type sort: bool :param encoder: Use custom json encoder :type encoder: T <= DateTimeEncoder :rtype: None """ # TODO: make pretty/compact into one bool? if encoder is None: encoder = DateTimeEncoder opened = False if not hasattr(file, "write"): file = io.open(file, "w", encoding="utf-8") opened = True try: if pretty: data = json.dumps( val, indent=4, separators=(',', ': '), sort_keys=sort, cls=encoder ) elif compact: data = json.dumps( val, separators=(',', ':'), sort_keys=sort, cls=encoder ) else: data = json.dumps(val, sort_keys=sort, cls=encoder) if not sys.version_info > (3, 0) and isinstance(data, str): data = data.decode("utf-8") file.write(data) finally: if opened: file.close()
def calculate_path(self, remote_relative_path, input_type): """ Only for used by Pulsar client, should override for managers to enforce security and make the directory if needed. """ directory, allow_nested_files = self._directory_for_file_type(input_type) return self.path_helper.remote_join(directory, remote_relative_path)
Only for used by Pulsar client, should override for managers to enforce security and make the directory if needed.
Below is the the instruction that describes the task: ### Input: Only for used by Pulsar client, should override for managers to enforce security and make the directory if needed. ### Response: def calculate_path(self, remote_relative_path, input_type): """ Only for used by Pulsar client, should override for managers to enforce security and make the directory if needed. """ directory, allow_nested_files = self._directory_for_file_type(input_type) return self.path_helper.remote_join(directory, remote_relative_path)
def list_users(self): """ List all users. """ sql = "SELECT * FROM user ORDER BY username" self._db_curs.execute(sql) users = list() for row in self._db_curs: users.append(dict(row)) return users
List all users.
Below is the the instruction that describes the task: ### Input: List all users. ### Response: def list_users(self): """ List all users. """ sql = "SELECT * FROM user ORDER BY username" self._db_curs.execute(sql) users = list() for row in self._db_curs: users.append(dict(row)) return users
def update_members(self, members, append_lists=False, remove_members=False): """ Update group members with member list. Set append=True to append to existing members, or append=False to overwrite. :param list members: new members for group by href or Element :type members: list[str, Element] :param bool append_lists: whether to append :param bool remove_members: remove members from the group :return: bool was modified or not """ if members: elements = [element_resolver(element) for element in members] if remove_members: element = [e for e in self.members if e not in elements] if set(element) == set(self.members): remove_members = element = False append_lists = False elif append_lists: element = [e for e in elements if e not in self.members] else: element = list(set(elements)) if element or remove_members: self.update( element=element, append_lists=append_lists) return True return False
Update group members with member list. Set append=True to append to existing members, or append=False to overwrite. :param list members: new members for group by href or Element :type members: list[str, Element] :param bool append_lists: whether to append :param bool remove_members: remove members from the group :return: bool was modified or not
Below is the the instruction that describes the task: ### Input: Update group members with member list. Set append=True to append to existing members, or append=False to overwrite. :param list members: new members for group by href or Element :type members: list[str, Element] :param bool append_lists: whether to append :param bool remove_members: remove members from the group :return: bool was modified or not ### Response: def update_members(self, members, append_lists=False, remove_members=False): """ Update group members with member list. Set append=True to append to existing members, or append=False to overwrite. :param list members: new members for group by href or Element :type members: list[str, Element] :param bool append_lists: whether to append :param bool remove_members: remove members from the group :return: bool was modified or not """ if members: elements = [element_resolver(element) for element in members] if remove_members: element = [e for e in self.members if e not in elements] if set(element) == set(self.members): remove_members = element = False append_lists = False elif append_lists: element = [e for e in elements if e not in self.members] else: element = list(set(elements)) if element or remove_members: self.update( element=element, append_lists=append_lists) return True return False
def set_stdev(self, col, row, stdev): """ Sets the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param stdev: the standard deviation to set :type stdev: float """ javabridge.call(self.jobject, "setStdDev", "(IID)V", col, row, stdev)
Sets the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param stdev: the standard deviation to set :type stdev: float
Below is the the instruction that describes the task: ### Input: Sets the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param stdev: the standard deviation to set :type stdev: float ### Response: def set_stdev(self, col, row, stdev): """ Sets the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :param stdev: the standard deviation to set :type stdev: float """ javabridge.call(self.jobject, "setStdDev", "(IID)V", col, row, stdev)
def post(self, path, args, wait=False): """POST an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: Content of the HTTP response if server returned 200 :rtype: str """ uri = self.make_uri(path) timeout = self.make_timeout(wait) for (key, value) in list(args.items()): args[key] = serialize(value, True) try: logger.debug("post: %s, timeout: %s, params: %s", uri, timeout, args) rsp = self._requests_con.post(uri, json=args, timeout=timeout, verify=self.strong_ssl) logger.debug("got: %d - %s", rsp.status_code, rsp.text) if rsp.status_code != 200: raise HTTPClientDataException(rsp.status_code, rsp.text, uri) return rsp.content except (requests.Timeout, requests.ConnectTimeout): raise HTTPClientTimeoutException(timeout, uri) except requests.ConnectionError as exp: raise HTTPClientConnectionException(uri, exp.args[0]) except Exception as exp: raise HTTPClientException('Request error to %s: %s' % (uri, exp))
POST an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: Content of the HTTP response if server returned 200 :rtype: str
Below is the the instruction that describes the task: ### Input: POST an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: Content of the HTTP response if server returned 200 :rtype: str ### Response: def post(self, path, args, wait=False): """POST an HTTP request to a daemon :param path: path to do the request :type path: str :param args: args to add in the request :type args: dict :param wait: True for a long timeout :type wait: bool :return: Content of the HTTP response if server returned 200 :rtype: str """ uri = self.make_uri(path) timeout = self.make_timeout(wait) for (key, value) in list(args.items()): args[key] = serialize(value, True) try: logger.debug("post: %s, timeout: %s, params: %s", uri, timeout, args) rsp = self._requests_con.post(uri, json=args, timeout=timeout, verify=self.strong_ssl) logger.debug("got: %d - %s", rsp.status_code, rsp.text) if rsp.status_code != 200: raise HTTPClientDataException(rsp.status_code, rsp.text, uri) return rsp.content except (requests.Timeout, requests.ConnectTimeout): raise HTTPClientTimeoutException(timeout, uri) except requests.ConnectionError as exp: raise HTTPClientConnectionException(uri, exp.args[0]) except Exception as exp: raise HTTPClientException('Request error to %s: %s' % (uri, exp))
def get_global_count(self): """ Return global count (used for naming of .json and .png files) Returns ------- int Global count """ # Check current number of json files in results directory and dump current json in new file path_to_json = self.results_folder_name + '/' json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')] Wrapper.global_count = len(json_files) + 1 return Wrapper.global_count
Return global count (used for naming of .json and .png files) Returns ------- int Global count
Below is the the instruction that describes the task: ### Input: Return global count (used for naming of .json and .png files) Returns ------- int Global count ### Response: def get_global_count(self): """ Return global count (used for naming of .json and .png files) Returns ------- int Global count """ # Check current number of json files in results directory and dump current json in new file path_to_json = self.results_folder_name + '/' json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')] Wrapper.global_count = len(json_files) + 1 return Wrapper.global_count
def _get_page_from_path(self, path): """ Fetches the FeinCMS Page object that the path points to. Override this to deal with different types of object from Page. """ from feincms.module.page.models import Page try: return Page.objects.best_match_for_path(path) except Page.DoesNotExist: return None
Fetches the FeinCMS Page object that the path points to. Override this to deal with different types of object from Page.
Below is the the instruction that describes the task: ### Input: Fetches the FeinCMS Page object that the path points to. Override this to deal with different types of object from Page. ### Response: def _get_page_from_path(self, path): """ Fetches the FeinCMS Page object that the path points to. Override this to deal with different types of object from Page. """ from feincms.module.page.models import Page try: return Page.objects.best_match_for_path(path) except Page.DoesNotExist: return None
def restart_program(self): """Restart Alignak Format of the line that triggers function call:: RESTART_PROGRAM :return: None """ restart_cmd = self.commands.find_by_name('restart-alignak') if not restart_cmd: logger.error("Cannot restart Alignak : missing command named" " 'restart-alignak'. Please add one") return restart_cmd_line = restart_cmd.command_line logger.warning("RESTART command : %s", restart_cmd_line) # Ok get an event handler command that will run in 15min max e_handler = EventHandler({'command': restart_cmd_line, 'timeout': 900}) # Ok now run it e_handler.execute() # And wait for the command to finish while e_handler.status not in [ACT_STATUS_DONE, ACT_STATUS_TIMEOUT]: e_handler.check_finished(64000) log_level = 'info' if e_handler.status == ACT_STATUS_TIMEOUT or e_handler.exit_status != 0: logger.error("Cannot restart Alignak : the 'restart-alignak' command failed with" " the error code '%d' and the text '%s'.", e_handler.exit_status, e_handler.output) log_level = 'error' # Ok here the command succeed, we can now wait our death self.send_an_element(make_monitoring_log(log_level, "RESTART: %s" % (e_handler.output)))
Restart Alignak Format of the line that triggers function call:: RESTART_PROGRAM :return: None
Below is the the instruction that describes the task: ### Input: Restart Alignak Format of the line that triggers function call:: RESTART_PROGRAM :return: None ### Response: def restart_program(self): """Restart Alignak Format of the line that triggers function call:: RESTART_PROGRAM :return: None """ restart_cmd = self.commands.find_by_name('restart-alignak') if not restart_cmd: logger.error("Cannot restart Alignak : missing command named" " 'restart-alignak'. Please add one") return restart_cmd_line = restart_cmd.command_line logger.warning("RESTART command : %s", restart_cmd_line) # Ok get an event handler command that will run in 15min max e_handler = EventHandler({'command': restart_cmd_line, 'timeout': 900}) # Ok now run it e_handler.execute() # And wait for the command to finish while e_handler.status not in [ACT_STATUS_DONE, ACT_STATUS_TIMEOUT]: e_handler.check_finished(64000) log_level = 'info' if e_handler.status == ACT_STATUS_TIMEOUT or e_handler.exit_status != 0: logger.error("Cannot restart Alignak : the 'restart-alignak' command failed with" " the error code '%d' and the text '%s'.", e_handler.exit_status, e_handler.output) log_level = 'error' # Ok here the command succeed, we can now wait our death self.send_an_element(make_monitoring_log(log_level, "RESTART: %s" % (e_handler.output)))
def wrap(item, args=None, krgs=None, **kwargs): """Wraps the given item content between horizontal lines. Item can be a string or a function. **Examples**: :: qprompt.wrap("Hi, this will be wrapped.") # String item. qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item. """ with Wrap(**kwargs): if callable(item): args = args or [] krgs = krgs or {} item(*args, **krgs) else: echo(item)
Wraps the given item content between horizontal lines. Item can be a string or a function. **Examples**: :: qprompt.wrap("Hi, this will be wrapped.") # String item. qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item.
Below is the the instruction that describes the task: ### Input: Wraps the given item content between horizontal lines. Item can be a string or a function. **Examples**: :: qprompt.wrap("Hi, this will be wrapped.") # String item. qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item. ### Response: def wrap(item, args=None, krgs=None, **kwargs): """Wraps the given item content between horizontal lines. Item can be a string or a function. **Examples**: :: qprompt.wrap("Hi, this will be wrapped.") # String item. qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item. """ with Wrap(**kwargs): if callable(item): args = args or [] krgs = krgs or {} item(*args, **krgs) else: echo(item)
def static2dplot_timeaveraged(var, time): """ If the static_taverage option is set in tplot, and is supplied with a time range, then the spectrogram plot(s) for which it is set will have another window pop up, where the displayed y and z values are averaged by the number of seconds between the specified time range. """ # Grab names of data loaded in as tplot variables. names = list(pytplot.data_quants.keys()) # Get data we'll actually work with here. valid_variables = tplot_utilities.get_data(names) # Don't plot anything unless we have spectrograms with which to work. if valid_variables: # Get z label labels = tplot_utilities.get_labels_axis_types(names) # Put together data in easy-to-access format for plots. data = {} for name in valid_variables: bins = tplot_utilities.get_bins(name) time_values, z_values = tplot_utilities.get_z_t_values(name) data[name] = [bins, z_values, time_values] # Set up the 2D static plot pytplot.static_tavg_window = pg.GraphicsWindow() pytplot.static_tavg_window.resize(1000, 600) pytplot.static_tavg_window.setWindowTitle('Time-Averaged Values Static Window') plot = pytplot.static_tavg_window.addPlot(title='2D Static Plot for Time-Averaged Values', row=0, col=0) # Make it so that whenever this first starts up, you just have an empty plot plot_data = plot.plot([], []) if var in valid_variables: # Get min/max values of data's time range (in both datetime and seconds since epoch) t_min = np.nanmin(time_values) t_min_str = tplot_utilities.int_to_str(np.nanmin(time_values)) t_min_conv_back = tplot_utilities.str_to_int(t_min_str) t_max = np.nanmax(time_values) t_max_str = tplot_utilities.int_to_str(np.nanmax(time_values)) t_max_conv_back = tplot_utilities.str_to_int(t_max_str) # Convert user input to seconds since epoch user_time = [tplot_utilities.str_to_int(i) for i in time] # Covering situation where user entered a time not in the dataset! # As long as they used a time in the dataset, this will not trigger. for t, datetime in enumerate(user_time): if datetime not in range(t_min_conv_back, t_max_conv_back+1): while True: try: if t == 0: time_bound = 'left bound' else: time_bound = 'right bound' user_time[t] = tplot_utilities.str_to_int(input( 'Chosen {} time [{}] not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S).'.format( time_bound, tplot_utilities.int_to_str(datetime), t_min_str, t_max_str))) except ValueError: continue else: if user_time[t] not in range(int(t_min), int(t_max)): continue else: break # Get index of the time closest to the user's time choice time_array = np.array(data[var][2]) array = np.asarray(time_array) idx = [(np.abs(array - i)).argmin() for i in user_time] # Average values based on the chosen time range's indices time_diff = abs(idx[0]-idx[1]) # Make sure to account for edge problem if idx[1] != -1: y_values_slice = data[name][1][idx[0]:idx[1]+1] else: y_values_slice = data[name][1][idx[0]:] y_values_avgd = np.nansum(y_values_slice, axis=0)/np.float(time_diff) # If user indicated they wanted the interactive plot's axes to be logged, log 'em. # But first make sure that values in x and y are loggable! x_axis = False y_axis = False # Checking x axis if np.nanmin(data[name][0][:]) < 0: print('Negative data is incompatible with log plotting.') elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log': x_axis = True # Checking y axis if np.nanmin(list(data[name][1][idx[0]])) < 0 or np.nanmin(list(data[name][1][idx[1]])) < 0: print('Negative data is incompatible with log plotting') elif np.nanmin(list(data[name][1][idx[0]])) >= 0 and np.nanmin(list(data[name][1][idx[1]])) >= 0 and \ labels[name][3] == 'log': y_axis = True # Set plot labels plot.setLabel('bottom', '{}'.format(labels[name][0])) plot.setLabel('left', '{}'.format(labels[name][1])) plot.setLogMode(x=x_axis, y=y_axis) # Update x and y range if user modified it tplot_utilities.set_x_range(name, x_axis, plot) tplot_utilities.set_y_range(name, y_axis, plot) # Plot data based on time we're hovering over plot_data.setData(data[var][0][:], y_values_avgd)
If the static_taverage option is set in tplot, and is supplied with a time range, then the spectrogram plot(s) for which it is set will have another window pop up, where the displayed y and z values are averaged by the number of seconds between the specified time range.
Below is the the instruction that describes the task: ### Input: If the static_taverage option is set in tplot, and is supplied with a time range, then the spectrogram plot(s) for which it is set will have another window pop up, where the displayed y and z values are averaged by the number of seconds between the specified time range. ### Response: def static2dplot_timeaveraged(var, time): """ If the static_taverage option is set in tplot, and is supplied with a time range, then the spectrogram plot(s) for which it is set will have another window pop up, where the displayed y and z values are averaged by the number of seconds between the specified time range. """ # Grab names of data loaded in as tplot variables. names = list(pytplot.data_quants.keys()) # Get data we'll actually work with here. valid_variables = tplot_utilities.get_data(names) # Don't plot anything unless we have spectrograms with which to work. if valid_variables: # Get z label labels = tplot_utilities.get_labels_axis_types(names) # Put together data in easy-to-access format for plots. data = {} for name in valid_variables: bins = tplot_utilities.get_bins(name) time_values, z_values = tplot_utilities.get_z_t_values(name) data[name] = [bins, z_values, time_values] # Set up the 2D static plot pytplot.static_tavg_window = pg.GraphicsWindow() pytplot.static_tavg_window.resize(1000, 600) pytplot.static_tavg_window.setWindowTitle('Time-Averaged Values Static Window') plot = pytplot.static_tavg_window.addPlot(title='2D Static Plot for Time-Averaged Values', row=0, col=0) # Make it so that whenever this first starts up, you just have an empty plot plot_data = plot.plot([], []) if var in valid_variables: # Get min/max values of data's time range (in both datetime and seconds since epoch) t_min = np.nanmin(time_values) t_min_str = tplot_utilities.int_to_str(np.nanmin(time_values)) t_min_conv_back = tplot_utilities.str_to_int(t_min_str) t_max = np.nanmax(time_values) t_max_str = tplot_utilities.int_to_str(np.nanmax(time_values)) t_max_conv_back = tplot_utilities.str_to_int(t_max_str) # Convert user input to seconds since epoch user_time = [tplot_utilities.str_to_int(i) for i in time] # Covering situation where user entered a time not in the dataset! # As long as they used a time in the dataset, this will not trigger. for t, datetime in enumerate(user_time): if datetime not in range(t_min_conv_back, t_max_conv_back+1): while True: try: if t == 0: time_bound = 'left bound' else: time_bound = 'right bound' user_time[t] = tplot_utilities.str_to_int(input( 'Chosen {} time [{}] not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S).'.format( time_bound, tplot_utilities.int_to_str(datetime), t_min_str, t_max_str))) except ValueError: continue else: if user_time[t] not in range(int(t_min), int(t_max)): continue else: break # Get index of the time closest to the user's time choice time_array = np.array(data[var][2]) array = np.asarray(time_array) idx = [(np.abs(array - i)).argmin() for i in user_time] # Average values based on the chosen time range's indices time_diff = abs(idx[0]-idx[1]) # Make sure to account for edge problem if idx[1] != -1: y_values_slice = data[name][1][idx[0]:idx[1]+1] else: y_values_slice = data[name][1][idx[0]:] y_values_avgd = np.nansum(y_values_slice, axis=0)/np.float(time_diff) # If user indicated they wanted the interactive plot's axes to be logged, log 'em. # But first make sure that values in x and y are loggable! x_axis = False y_axis = False # Checking x axis if np.nanmin(data[name][0][:]) < 0: print('Negative data is incompatible with log plotting.') elif np.nanmin(data[name][0][:]) >= 0 and labels[name][2] == 'log': x_axis = True # Checking y axis if np.nanmin(list(data[name][1][idx[0]])) < 0 or np.nanmin(list(data[name][1][idx[1]])) < 0: print('Negative data is incompatible with log plotting') elif np.nanmin(list(data[name][1][idx[0]])) >= 0 and np.nanmin(list(data[name][1][idx[1]])) >= 0 and \ labels[name][3] == 'log': y_axis = True # Set plot labels plot.setLabel('bottom', '{}'.format(labels[name][0])) plot.setLabel('left', '{}'.format(labels[name][1])) plot.setLogMode(x=x_axis, y=y_axis) # Update x and y range if user modified it tplot_utilities.set_x_range(name, x_axis, plot) tplot_utilities.set_y_range(name, y_axis, plot) # Plot data based on time we're hovering over plot_data.setData(data[var][0][:], y_values_avgd)
def callable(self) -> Optional[str]: """Show the name of the current callable in the trace""" if self.current_trace_frame_index != -1: return self._get_callable_from_trace_tuple( self.trace_tuples[self.current_trace_frame_index] )[0] return None
Show the name of the current callable in the trace
Below is the the instruction that describes the task: ### Input: Show the name of the current callable in the trace ### Response: def callable(self) -> Optional[str]: """Show the name of the current callable in the trace""" if self.current_trace_frame_index != -1: return self._get_callable_from_trace_tuple( self.trace_tuples[self.current_trace_frame_index] )[0] return None
def rmse(self): """Get RMSE for regression model evaluation results. Returns: the RMSE float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number. """ if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or 'predicted' not in df: raise ValueError('Cannot find "target" or "predicted" column') df = df[['target', 'predicted']].apply(pd.to_numeric) # if df is empty or contains non-numeric, scikit learn will raise error. mse = mean_squared_error(df['target'], df['predicted']) return math.sqrt(mse) elif self._bigquery: query = bq.Query(""" SELECT SQRT(SUM(ABS(predicted-target) * ABS(predicted-target)) / COUNT(*)) as rmse FROM %s""" % self._bigquery) df = self._get_data_from_bigquery([query]) if df.empty: return None return df['rmse'][0]
Get RMSE for regression model evaluation results. Returns: the RMSE float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number.
Below is the the instruction that describes the task: ### Input: Get RMSE for regression model evaluation results. Returns: the RMSE float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number. ### Response: def rmse(self): """Get RMSE for regression model evaluation results. Returns: the RMSE float number. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column, or if target or predicted is not number. """ if self._input_csv_files: df = self._get_data_from_csv_files() if 'target' not in df or 'predicted' not in df: raise ValueError('Cannot find "target" or "predicted" column') df = df[['target', 'predicted']].apply(pd.to_numeric) # if df is empty or contains non-numeric, scikit learn will raise error. mse = mean_squared_error(df['target'], df['predicted']) return math.sqrt(mse) elif self._bigquery: query = bq.Query(""" SELECT SQRT(SUM(ABS(predicted-target) * ABS(predicted-target)) / COUNT(*)) as rmse FROM %s""" % self._bigquery) df = self._get_data_from_bigquery([query]) if df.empty: return None return df['rmse'][0]
def clear_db(): """Clear the entire db.""" cursor = '0' while cursor != 0: cursor, keys = DB.scan(cursor, match='*', count=5000) if keys: DB.delete(*keys)
Clear the entire db.
Below is the the instruction that describes the task: ### Input: Clear the entire db. ### Response: def clear_db(): """Clear the entire db.""" cursor = '0' while cursor != 0: cursor, keys = DB.scan(cursor, match='*', count=5000) if keys: DB.delete(*keys)
def get_coord_line_number(self,coord): """return the one-indexed line number given the coordinates""" if coord[0] in self._coords: if coord[1] in self._coords[coord[0]]: return self._coords[coord[0]][coord[1]] return None
return the one-indexed line number given the coordinates
Below is the the instruction that describes the task: ### Input: return the one-indexed line number given the coordinates ### Response: def get_coord_line_number(self,coord): """return the one-indexed line number given the coordinates""" if coord[0] in self._coords: if coord[1] in self._coords[coord[0]]: return self._coords[coord[0]][coord[1]] return None
def update_cache(from_currency, to_currency): """ update from_currency to_currency pair in cache if last update for that pair is over 30 minutes ago by request API info """ if check_update(from_currency, to_currency) is True: ccache[from_currency][to_currency]['value'] = convert_using_api(from_currency, to_currency) ccache[from_currency][to_currency]['last_update'] = time.time() cache.write(ccache)
update from_currency to_currency pair in cache if last update for that pair is over 30 minutes ago by request API info
Below is the the instruction that describes the task: ### Input: update from_currency to_currency pair in cache if last update for that pair is over 30 minutes ago by request API info ### Response: def update_cache(from_currency, to_currency): """ update from_currency to_currency pair in cache if last update for that pair is over 30 minutes ago by request API info """ if check_update(from_currency, to_currency) is True: ccache[from_currency][to_currency]['value'] = convert_using_api(from_currency, to_currency) ccache[from_currency][to_currency]['last_update'] = time.time() cache.write(ccache)
def create_object(self, name, experiment_id, model_id, argument_defs, arguments=None, properties=None): """Create a model run object with the given list of arguments. The initial state of the object is RUNNING. Raises ValueError if given arguments are invalid. Parameters ---------- name : string User-provided name for the model run experiment_id : string Unique identifier of associated experiment object model_id : string Unique model identifier argument_defs : list(attribute.AttributeDefinition) Definition of valid arguments for the given model arguments : list(dict('name':...,'value:...')), optional List of attribute instances properties : Dictionary, optional Set of model run properties. Returns ------- PredictionHandle Object handle for created model run """ # Create a new object identifier. identifier = str(uuid.uuid4()).replace('-','') # Directory for successful model run resource files. Directories are # simply named by object identifier directory = os.path.join(self.directory, identifier) # Create the directory if it doesn't exists if not os.access(directory, os.F_OK): os.makedirs(directory) # By default all model runs are in IDLE state at creation state = ModelRunIdle() # Create the initial set of properties. run_properties = { datastore.PROPERTY_NAME: name, datastore.PROPERTY_STATE: str(state), datastore.PROPERTY_MODEL: model_id } if not properties is None: for prop in properties: if not prop in run_properties: run_properties[prop] = properties[prop] # If argument list is not given then the initial set of arguments is # empty. Here we do not validate the given arguments. Definitions of # valid argument sets are maintained in the model registry and are not # accessible by the model run manager at this point. run_arguments = {} if not arguments is None: # Convert arguments to dictionary of Atrribute instances. Will # raise an exception if values are of invalid type. run_arguments = attribute.to_dict(arguments, argument_defs) # Create the image group object and store it in the database before # returning it. obj = ModelRunHandle( identifier, run_properties, directory, state, experiment_id, model_id, run_arguments ) self.insert_object(obj) return obj
Create a model run object with the given list of arguments. The initial state of the object is RUNNING. Raises ValueError if given arguments are invalid. Parameters ---------- name : string User-provided name for the model run experiment_id : string Unique identifier of associated experiment object model_id : string Unique model identifier argument_defs : list(attribute.AttributeDefinition) Definition of valid arguments for the given model arguments : list(dict('name':...,'value:...')), optional List of attribute instances properties : Dictionary, optional Set of model run properties. Returns ------- PredictionHandle Object handle for created model run
Below is the the instruction that describes the task: ### Input: Create a model run object with the given list of arguments. The initial state of the object is RUNNING. Raises ValueError if given arguments are invalid. Parameters ---------- name : string User-provided name for the model run experiment_id : string Unique identifier of associated experiment object model_id : string Unique model identifier argument_defs : list(attribute.AttributeDefinition) Definition of valid arguments for the given model arguments : list(dict('name':...,'value:...')), optional List of attribute instances properties : Dictionary, optional Set of model run properties. Returns ------- PredictionHandle Object handle for created model run ### Response: def create_object(self, name, experiment_id, model_id, argument_defs, arguments=None, properties=None): """Create a model run object with the given list of arguments. The initial state of the object is RUNNING. Raises ValueError if given arguments are invalid. Parameters ---------- name : string User-provided name for the model run experiment_id : string Unique identifier of associated experiment object model_id : string Unique model identifier argument_defs : list(attribute.AttributeDefinition) Definition of valid arguments for the given model arguments : list(dict('name':...,'value:...')), optional List of attribute instances properties : Dictionary, optional Set of model run properties. Returns ------- PredictionHandle Object handle for created model run """ # Create a new object identifier. identifier = str(uuid.uuid4()).replace('-','') # Directory for successful model run resource files. Directories are # simply named by object identifier directory = os.path.join(self.directory, identifier) # Create the directory if it doesn't exists if not os.access(directory, os.F_OK): os.makedirs(directory) # By default all model runs are in IDLE state at creation state = ModelRunIdle() # Create the initial set of properties. run_properties = { datastore.PROPERTY_NAME: name, datastore.PROPERTY_STATE: str(state), datastore.PROPERTY_MODEL: model_id } if not properties is None: for prop in properties: if not prop in run_properties: run_properties[prop] = properties[prop] # If argument list is not given then the initial set of arguments is # empty. Here we do not validate the given arguments. Definitions of # valid argument sets are maintained in the model registry and are not # accessible by the model run manager at this point. run_arguments = {} if not arguments is None: # Convert arguments to dictionary of Atrribute instances. Will # raise an exception if values are of invalid type. run_arguments = attribute.to_dict(arguments, argument_defs) # Create the image group object and store it in the database before # returning it. obj = ModelRunHandle( identifier, run_properties, directory, state, experiment_id, model_id, run_arguments ) self.insert_object(obj) return obj
def get_string_from_data(self, offset, data): """Get an ASCII string from within the data.""" # OC Patch b = None try: b = data[offset] except IndexError: return '' s = '' while ord(b): s += b offset += 1 try: b = data[offset] except IndexError: break return s
Get an ASCII string from within the data.
Below is the the instruction that describes the task: ### Input: Get an ASCII string from within the data. ### Response: def get_string_from_data(self, offset, data): """Get an ASCII string from within the data.""" # OC Patch b = None try: b = data[offset] except IndexError: return '' s = '' while ord(b): s += b offset += 1 try: b = data[offset] except IndexError: break return s
def get_assets_by_genus_type(self, asset_genus_type): """Gets an ``AssetList`` corresponding to the given asset genus ``Type`` which does not include assets of types derived from the specified ``Type``. In plenary mode, the returned list contains all known assets or an error results. Otherwise, the returned list may contain only those assets that are accessible through this session. arg: asset_genus_type (osid.type.Type): an asset genus type return: (osid.repository.AssetList) - the returned ``Asset list`` raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(asset_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.AssetList(result, runtime=self._runtime, proxy=self._proxy)
Gets an ``AssetList`` corresponding to the given asset genus ``Type`` which does not include assets of types derived from the specified ``Type``. In plenary mode, the returned list contains all known assets or an error results. Otherwise, the returned list may contain only those assets that are accessible through this session. arg: asset_genus_type (osid.type.Type): an asset genus type return: (osid.repository.AssetList) - the returned ``Asset list`` raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets an ``AssetList`` corresponding to the given asset genus ``Type`` which does not include assets of types derived from the specified ``Type``. In plenary mode, the returned list contains all known assets or an error results. Otherwise, the returned list may contain only those assets that are accessible through this session. arg: asset_genus_type (osid.type.Type): an asset genus type return: (osid.repository.AssetList) - the returned ``Asset list`` raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ### Response: def get_assets_by_genus_type(self, asset_genus_type): """Gets an ``AssetList`` corresponding to the given asset genus ``Type`` which does not include assets of types derived from the specified ``Type``. In plenary mode, the returned list contains all known assets or an error results. Otherwise, the returned list may contain only those assets that are accessible through this session. arg: asset_genus_type (osid.type.Type): an asset genus type return: (osid.repository.AssetList) - the returned ``Asset list`` raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(asset_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.AssetList(result, runtime=self._runtime, proxy=self._proxy)
def drop(n, it, constructor=list): """ >>> first(10,drop(10,xrange(sys.maxint),iter)) [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] """ return constructor(itertools.islice(it,n,None))
>>> first(10,drop(10,xrange(sys.maxint),iter)) [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
Below is the the instruction that describes the task: ### Input: >>> first(10,drop(10,xrange(sys.maxint),iter)) [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] ### Response: def drop(n, it, constructor=list): """ >>> first(10,drop(10,xrange(sys.maxint),iter)) [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] """ return constructor(itertools.islice(it,n,None))
def asdict(model, exclude=None, exclude_underscore=None, exclude_pk=None, follow=None, include=None, only=None, method='asdict', **kwargs): """Get a dict from a model Using the `method` parameter makes it possible to have multiple methods that formats the result. Additional keyword arguments will be passed to all relationships that are followed. This can be used to pass on things like request or context. :param follow: List or dict of relationships that should be followed. If the parameter is a dict the value should be a dict of \ keyword arguments. Currently it follows InstrumentedList, \ MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \ takes an extra argument, 'method', which is the method that \ should be used on the relation. It also takes the extra argument \ 'parent' which determines where the relationships data should be \ added in the response dict. If 'parent' is set the relationship \ will be added with it's own key as a child to `parent`. :param exclude: List of properties that should be excluded, will be \ merged with `model.dictalchemy_exclude` :param exclude_pk: If True any column that refers to the primary key will \ be excluded. :param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\ if set :param include: List of properties that should be included. Use this to \ allow python properties to be called. This list will be merged \ with `model.dictalchemy_asdict_include` or \ `model.dictalchemy_include`. :param only: List of properties that should be included. This will \ override everything else except `follow`. :param method: Name of the method that is currently called. This will be \ the default method used in 'follow' unless another method is\ set. :raises: :class:`dictalchemy.errors.MissingRelationError` \ if `follow` contains a non-existent relationship. :raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \ contains an existing relationship that currently isn't supported. :returns: dict """ follow = arg_to_dict(follow) info = inspect(model) columns = [c.key for c in info.mapper.column_attrs] synonyms = [c.key for c in info.mapper.synonyms] if only: attrs = only else: exclude = exclude or [] exclude += getattr(model, 'dictalchemy_exclude', constants.default_exclude) or [] if exclude_underscore is None: exclude_underscore = getattr(model, 'dictalchemy_exclude_underscore', constants.default_exclude_underscore) if exclude_underscore: # Exclude all properties starting with underscore exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_'] if exclude_pk is True: exclude += [c.key for c in info.mapper.primary_key] include = (include or []) + (getattr(model, 'dictalchemy_asdict_include', getattr(model, 'dictalchemy_include', None)) or []) attrs = [k for k in columns + synonyms + include if k not in exclude] data = dict([(k, getattr(model, k)) for k in attrs]) for (rel_key, orig_args) in follow.iteritems(): try: rel = getattr(model, rel_key) except AttributeError: raise errors.MissingRelationError(rel_key) args = copy.deepcopy(orig_args) method = args.pop('method', method) args['method'] = method args.update(copy.copy(kwargs)) if hasattr(rel, method): rel_data = getattr(rel, method)(**args) elif isinstance(rel, (list, _AssociationList)): rel_data = [] for child in rel: if hasattr(child, method): rel_data.append(getattr(child, method)(**args)) else: try: rel_data.append(dict(child)) # TypeError is for non-dictable children except TypeError: rel_data.append(copy.copy(child)) elif isinstance(rel, dict): rel_data = {} for (child_key, child) in rel.iteritems(): if hasattr(child, method): rel_data[child_key] = getattr(child, method)(**args) else: try: rel_data[child_key] = dict(child) except ValueError: rel_data[child_key] = copy.copy(child) elif isinstance(rel, (AppenderMixin, Query)): rel_data = [] for child in rel.all(): if hasattr(child, method): rel_data.append(getattr(child, method)(**args)) else: rel_data.append(dict(child)) elif rel is None: rel_data = None else: raise errors.UnsupportedRelationError(rel_key) ins_key = args.pop('parent', None) if ins_key is None: data[rel_key] = rel_data else: if ins_key not in data: data[ins_key] = {} data[ins_key][rel_key] = rel_data return data
Get a dict from a model Using the `method` parameter makes it possible to have multiple methods that formats the result. Additional keyword arguments will be passed to all relationships that are followed. This can be used to pass on things like request or context. :param follow: List or dict of relationships that should be followed. If the parameter is a dict the value should be a dict of \ keyword arguments. Currently it follows InstrumentedList, \ MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \ takes an extra argument, 'method', which is the method that \ should be used on the relation. It also takes the extra argument \ 'parent' which determines where the relationships data should be \ added in the response dict. If 'parent' is set the relationship \ will be added with it's own key as a child to `parent`. :param exclude: List of properties that should be excluded, will be \ merged with `model.dictalchemy_exclude` :param exclude_pk: If True any column that refers to the primary key will \ be excluded. :param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\ if set :param include: List of properties that should be included. Use this to \ allow python properties to be called. This list will be merged \ with `model.dictalchemy_asdict_include` or \ `model.dictalchemy_include`. :param only: List of properties that should be included. This will \ override everything else except `follow`. :param method: Name of the method that is currently called. This will be \ the default method used in 'follow' unless another method is\ set. :raises: :class:`dictalchemy.errors.MissingRelationError` \ if `follow` contains a non-existent relationship. :raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \ contains an existing relationship that currently isn't supported. :returns: dict
Below is the the instruction that describes the task: ### Input: Get a dict from a model Using the `method` parameter makes it possible to have multiple methods that formats the result. Additional keyword arguments will be passed to all relationships that are followed. This can be used to pass on things like request or context. :param follow: List or dict of relationships that should be followed. If the parameter is a dict the value should be a dict of \ keyword arguments. Currently it follows InstrumentedList, \ MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \ takes an extra argument, 'method', which is the method that \ should be used on the relation. It also takes the extra argument \ 'parent' which determines where the relationships data should be \ added in the response dict. If 'parent' is set the relationship \ will be added with it's own key as a child to `parent`. :param exclude: List of properties that should be excluded, will be \ merged with `model.dictalchemy_exclude` :param exclude_pk: If True any column that refers to the primary key will \ be excluded. :param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\ if set :param include: List of properties that should be included. Use this to \ allow python properties to be called. This list will be merged \ with `model.dictalchemy_asdict_include` or \ `model.dictalchemy_include`. :param only: List of properties that should be included. This will \ override everything else except `follow`. :param method: Name of the method that is currently called. This will be \ the default method used in 'follow' unless another method is\ set. :raises: :class:`dictalchemy.errors.MissingRelationError` \ if `follow` contains a non-existent relationship. :raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \ contains an existing relationship that currently isn't supported. :returns: dict ### Response: def asdict(model, exclude=None, exclude_underscore=None, exclude_pk=None, follow=None, include=None, only=None, method='asdict', **kwargs): """Get a dict from a model Using the `method` parameter makes it possible to have multiple methods that formats the result. Additional keyword arguments will be passed to all relationships that are followed. This can be used to pass on things like request or context. :param follow: List or dict of relationships that should be followed. If the parameter is a dict the value should be a dict of \ keyword arguments. Currently it follows InstrumentedList, \ MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \ takes an extra argument, 'method', which is the method that \ should be used on the relation. It also takes the extra argument \ 'parent' which determines where the relationships data should be \ added in the response dict. If 'parent' is set the relationship \ will be added with it's own key as a child to `parent`. :param exclude: List of properties that should be excluded, will be \ merged with `model.dictalchemy_exclude` :param exclude_pk: If True any column that refers to the primary key will \ be excluded. :param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\ if set :param include: List of properties that should be included. Use this to \ allow python properties to be called. This list will be merged \ with `model.dictalchemy_asdict_include` or \ `model.dictalchemy_include`. :param only: List of properties that should be included. This will \ override everything else except `follow`. :param method: Name of the method that is currently called. This will be \ the default method used in 'follow' unless another method is\ set. :raises: :class:`dictalchemy.errors.MissingRelationError` \ if `follow` contains a non-existent relationship. :raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \ contains an existing relationship that currently isn't supported. :returns: dict """ follow = arg_to_dict(follow) info = inspect(model) columns = [c.key for c in info.mapper.column_attrs] synonyms = [c.key for c in info.mapper.synonyms] if only: attrs = only else: exclude = exclude or [] exclude += getattr(model, 'dictalchemy_exclude', constants.default_exclude) or [] if exclude_underscore is None: exclude_underscore = getattr(model, 'dictalchemy_exclude_underscore', constants.default_exclude_underscore) if exclude_underscore: # Exclude all properties starting with underscore exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_'] if exclude_pk is True: exclude += [c.key for c in info.mapper.primary_key] include = (include or []) + (getattr(model, 'dictalchemy_asdict_include', getattr(model, 'dictalchemy_include', None)) or []) attrs = [k for k in columns + synonyms + include if k not in exclude] data = dict([(k, getattr(model, k)) for k in attrs]) for (rel_key, orig_args) in follow.iteritems(): try: rel = getattr(model, rel_key) except AttributeError: raise errors.MissingRelationError(rel_key) args = copy.deepcopy(orig_args) method = args.pop('method', method) args['method'] = method args.update(copy.copy(kwargs)) if hasattr(rel, method): rel_data = getattr(rel, method)(**args) elif isinstance(rel, (list, _AssociationList)): rel_data = [] for child in rel: if hasattr(child, method): rel_data.append(getattr(child, method)(**args)) else: try: rel_data.append(dict(child)) # TypeError is for non-dictable children except TypeError: rel_data.append(copy.copy(child)) elif isinstance(rel, dict): rel_data = {} for (child_key, child) in rel.iteritems(): if hasattr(child, method): rel_data[child_key] = getattr(child, method)(**args) else: try: rel_data[child_key] = dict(child) except ValueError: rel_data[child_key] = copy.copy(child) elif isinstance(rel, (AppenderMixin, Query)): rel_data = [] for child in rel.all(): if hasattr(child, method): rel_data.append(getattr(child, method)(**args)) else: rel_data.append(dict(child)) elif rel is None: rel_data = None else: raise errors.UnsupportedRelationError(rel_key) ins_key = args.pop('parent', None) if ins_key is None: data[rel_key] = rel_data else: if ins_key not in data: data[ins_key] = {} data[ins_key][rel_key] = rel_data return data
def map_borders(wls): """Compute borders of pixels for interpolation. The border of the pixel is assumed to be midway of the wls """ midpt_wl = 0.5 * (wls[1:] + wls[:-1]) all_borders = np.zeros((wls.shape[0] + 1,)) all_borders[1:-1] = midpt_wl all_borders[0] = 2 * wls[0] - midpt_wl[0] all_borders[-1] = 2 * wls[-1] - midpt_wl[-1] return all_borders
Compute borders of pixels for interpolation. The border of the pixel is assumed to be midway of the wls
Below is the the instruction that describes the task: ### Input: Compute borders of pixels for interpolation. The border of the pixel is assumed to be midway of the wls ### Response: def map_borders(wls): """Compute borders of pixels for interpolation. The border of the pixel is assumed to be midway of the wls """ midpt_wl = 0.5 * (wls[1:] + wls[:-1]) all_borders = np.zeros((wls.shape[0] + 1,)) all_borders[1:-1] = midpt_wl all_borders[0] = 2 * wls[0] - midpt_wl[0] all_borders[-1] = 2 * wls[-1] - midpt_wl[-1] return all_borders
def disable_contactgroup_host_notifications(self, contactgroup): """Disable host notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None """ for contact_id in contactgroup.get_contacts(): self.disable_contact_host_notifications(self.daemon.contacts[contact_id])
Disable host notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None
Below is the the instruction that describes the task: ### Input: Disable host notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None ### Response: def disable_contactgroup_host_notifications(self, contactgroup): """Disable host notifications for a contactgroup Format of the line that triggers function call:: DISABLE_CONTACTGROUP_HOST_NOTIFICATIONS;<contactgroup_name> :param contactgroup: contactgroup to disable :type contactgroup: alignak.objects.contactgroup.Contactgroup :return: None """ for contact_id in contactgroup.get_contacts(): self.disable_contact_host_notifications(self.daemon.contacts[contact_id])
def get_movement_delta(self): """Get the amount the camera has moved since get_movement_delta was last called.""" pos = self.pan - self.previous_pos self.previous_pos = Vector2(self.pan.X, self.pan.Y) return pos
Get the amount the camera has moved since get_movement_delta was last called.
Below is the the instruction that describes the task: ### Input: Get the amount the camera has moved since get_movement_delta was last called. ### Response: def get_movement_delta(self): """Get the amount the camera has moved since get_movement_delta was last called.""" pos = self.pan - self.previous_pos self.previous_pos = Vector2(self.pan.X, self.pan.Y) return pos
def create_all_parent_directories(ase, dirs_created, timeout=None): # type: (blobxfer.models.azure.StorageEntity, dict, int) -> None """Create all parent directories for a file :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity :param dict dirs_created: directories already created map :param int timeout: timeout """ dirs = pathlib.Path(ase.name).parts if len(dirs) <= 1: return # remove last part (which is the file) dirs = dirs[:-1] dk = ase.client.account_name + ':' + ase.container for i in range(0, len(dirs)): dir = str(pathlib.Path(*(dirs[0:i + 1]))) if dk not in dirs_created or dir not in dirs_created[dk]: ase.client.create_directory( share_name=ase.container, directory_name=dir, fail_on_exist=False, timeout=timeout) if dk not in dirs_created: dirs_created[dk] = set() dirs_created[dk].add(dir)
Create all parent directories for a file :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity :param dict dirs_created: directories already created map :param int timeout: timeout
Below is the the instruction that describes the task: ### Input: Create all parent directories for a file :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity :param dict dirs_created: directories already created map :param int timeout: timeout ### Response: def create_all_parent_directories(ase, dirs_created, timeout=None): # type: (blobxfer.models.azure.StorageEntity, dict, int) -> None """Create all parent directories for a file :param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity :param dict dirs_created: directories already created map :param int timeout: timeout """ dirs = pathlib.Path(ase.name).parts if len(dirs) <= 1: return # remove last part (which is the file) dirs = dirs[:-1] dk = ase.client.account_name + ':' + ase.container for i in range(0, len(dirs)): dir = str(pathlib.Path(*(dirs[0:i + 1]))) if dk not in dirs_created or dir not in dirs_created[dk]: ase.client.create_directory( share_name=ase.container, directory_name=dir, fail_on_exist=False, timeout=timeout) if dk not in dirs_created: dirs_created[dk] = set() dirs_created[dk].add(dir)
def normalize(tensor, mean, std, inplace=False): """Normalize a tensor image with mean and standard deviation. .. note:: This transform acts out of place by default, i.e., it does not mutates the input tensor. See :class:`~torchvision.transforms.Normalize` for more details. Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. Returns: Tensor: Normalized Tensor image. """ if not _is_tensor_image(tensor): raise TypeError('tensor is not a torch image.') if not inplace: tensor = tensor.clone() mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device) std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device) tensor.sub_(mean[:, None, None]).div_(std[:, None, None]) return tensor
Normalize a tensor image with mean and standard deviation. .. note:: This transform acts out of place by default, i.e., it does not mutates the input tensor. See :class:`~torchvision.transforms.Normalize` for more details. Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. Returns: Tensor: Normalized Tensor image.
Below is the the instruction that describes the task: ### Input: Normalize a tensor image with mean and standard deviation. .. note:: This transform acts out of place by default, i.e., it does not mutates the input tensor. See :class:`~torchvision.transforms.Normalize` for more details. Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. Returns: Tensor: Normalized Tensor image. ### Response: def normalize(tensor, mean, std, inplace=False): """Normalize a tensor image with mean and standard deviation. .. note:: This transform acts out of place by default, i.e., it does not mutates the input tensor. See :class:`~torchvision.transforms.Normalize` for more details. Args: tensor (Tensor): Tensor image of size (C, H, W) to be normalized. mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. Returns: Tensor: Normalized Tensor image. """ if not _is_tensor_image(tensor): raise TypeError('tensor is not a torch image.') if not inplace: tensor = tensor.clone() mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device) std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device) tensor.sub_(mean[:, None, None]).div_(std[:, None, None]) return tensor
def revert(self, filename=None, ignore_discard=False, ignore_expires=False): """Clear all cookies and reload cookies from a saved file. Raises LoadError (or IOError) if reversion is not successful; the object's state will not be altered if this happens. """ if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) self._cookies_lock.acquire() try: old_state = copy.deepcopy(self._cookies) self._cookies = {} try: self.load(filename, ignore_discard, ignore_expires) except (LoadError, IOError): self._cookies = old_state raise finally: self._cookies_lock.release()
Clear all cookies and reload cookies from a saved file. Raises LoadError (or IOError) if reversion is not successful; the object's state will not be altered if this happens.
Below is the the instruction that describes the task: ### Input: Clear all cookies and reload cookies from a saved file. Raises LoadError (or IOError) if reversion is not successful; the object's state will not be altered if this happens. ### Response: def revert(self, filename=None, ignore_discard=False, ignore_expires=False): """Clear all cookies and reload cookies from a saved file. Raises LoadError (or IOError) if reversion is not successful; the object's state will not be altered if this happens. """ if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) self._cookies_lock.acquire() try: old_state = copy.deepcopy(self._cookies) self._cookies = {} try: self.load(filename, ignore_discard, ignore_expires) except (LoadError, IOError): self._cookies = old_state raise finally: self._cookies_lock.release()
def generate_hexagonal_lattice(maxv1, minv1, maxv2, minv2, mindist): """ This function generates a 2-dimensional lattice of points using a hexagonal lattice. Parameters ----------- maxv1 : float Largest value in the 1st dimension to cover minv1 : float Smallest value in the 1st dimension to cover maxv2 : float Largest value in the 2nd dimension to cover minv2 : float Smallest value in the 2nd dimension to cover mindist : float Maximum allowed mismatch between a point in the parameter space and the generated bank of points. Returns -------- v1s : numpy.array Array of positions in the first dimension v2s : numpy.array Array of positions in the second dimension """ if minv1 > maxv1: raise ValueError("Invalid input to function.") if minv2 > maxv2: raise ValueError("Invalid input to function.") # Place first point v1s = [minv1] v2s = [minv2] initPoint = [minv1,minv2] # Place first line initLine = [initPoint] tmpv1 = minv1 while (tmpv1 < maxv1): tmpv1 = tmpv1 + (3 * mindist)**(0.5) initLine.append([tmpv1,minv2]) v1s.append(tmpv1) v2s.append(minv2) initLine = numpy.array(initLine) initLine2 = copy.deepcopy(initLine) initLine2[:,0] += 0.5 * (3*mindist)**0.5 initLine2[:,1] += 1.5 * (mindist)**0.5 for i in xrange(len(initLine2)): v1s.append(initLine2[i,0]) v2s.append(initLine2[i,1]) tmpv2_1 = initLine[0,1] tmpv2_2 = initLine2[0,1] while tmpv2_1 < maxv2 and tmpv2_2 < maxv2: tmpv2_1 = tmpv2_1 + 3.0 * (mindist)**0.5 tmpv2_2 = tmpv2_2 + 3.0 * (mindist)**0.5 initLine[:,1] = tmpv2_1 initLine2[:,1] = tmpv2_2 for i in xrange(len(initLine)): v1s.append(initLine[i,0]) v2s.append(initLine[i,1]) for i in xrange(len(initLine2)): v1s.append(initLine2[i,0]) v2s.append(initLine2[i,1]) v1s = numpy.array(v1s) v2s = numpy.array(v2s) return v1s, v2s
This function generates a 2-dimensional lattice of points using a hexagonal lattice. Parameters ----------- maxv1 : float Largest value in the 1st dimension to cover minv1 : float Smallest value in the 1st dimension to cover maxv2 : float Largest value in the 2nd dimension to cover minv2 : float Smallest value in the 2nd dimension to cover mindist : float Maximum allowed mismatch between a point in the parameter space and the generated bank of points. Returns -------- v1s : numpy.array Array of positions in the first dimension v2s : numpy.array Array of positions in the second dimension
Below is the the instruction that describes the task: ### Input: This function generates a 2-dimensional lattice of points using a hexagonal lattice. Parameters ----------- maxv1 : float Largest value in the 1st dimension to cover minv1 : float Smallest value in the 1st dimension to cover maxv2 : float Largest value in the 2nd dimension to cover minv2 : float Smallest value in the 2nd dimension to cover mindist : float Maximum allowed mismatch between a point in the parameter space and the generated bank of points. Returns -------- v1s : numpy.array Array of positions in the first dimension v2s : numpy.array Array of positions in the second dimension ### Response: def generate_hexagonal_lattice(maxv1, minv1, maxv2, minv2, mindist): """ This function generates a 2-dimensional lattice of points using a hexagonal lattice. Parameters ----------- maxv1 : float Largest value in the 1st dimension to cover minv1 : float Smallest value in the 1st dimension to cover maxv2 : float Largest value in the 2nd dimension to cover minv2 : float Smallest value in the 2nd dimension to cover mindist : float Maximum allowed mismatch between a point in the parameter space and the generated bank of points. Returns -------- v1s : numpy.array Array of positions in the first dimension v2s : numpy.array Array of positions in the second dimension """ if minv1 > maxv1: raise ValueError("Invalid input to function.") if minv2 > maxv2: raise ValueError("Invalid input to function.") # Place first point v1s = [minv1] v2s = [minv2] initPoint = [minv1,minv2] # Place first line initLine = [initPoint] tmpv1 = minv1 while (tmpv1 < maxv1): tmpv1 = tmpv1 + (3 * mindist)**(0.5) initLine.append([tmpv1,minv2]) v1s.append(tmpv1) v2s.append(minv2) initLine = numpy.array(initLine) initLine2 = copy.deepcopy(initLine) initLine2[:,0] += 0.5 * (3*mindist)**0.5 initLine2[:,1] += 1.5 * (mindist)**0.5 for i in xrange(len(initLine2)): v1s.append(initLine2[i,0]) v2s.append(initLine2[i,1]) tmpv2_1 = initLine[0,1] tmpv2_2 = initLine2[0,1] while tmpv2_1 < maxv2 and tmpv2_2 < maxv2: tmpv2_1 = tmpv2_1 + 3.0 * (mindist)**0.5 tmpv2_2 = tmpv2_2 + 3.0 * (mindist)**0.5 initLine[:,1] = tmpv2_1 initLine2[:,1] = tmpv2_2 for i in xrange(len(initLine)): v1s.append(initLine[i,0]) v2s.append(initLine[i,1]) for i in xrange(len(initLine2)): v1s.append(initLine2[i,0]) v2s.append(initLine2[i,1]) v1s = numpy.array(v1s) v2s = numpy.array(v2s) return v1s, v2s
def make_mapcube_source(name, Spatial_Filename, spectrum): """Construct and return a `fermipy.roi_model.MapCubeSource` object """ data = dict(Spatial_Filename=Spatial_Filename) if spectrum is not None: data.update(spectrum) return roi_model.MapCubeSource(name, data)
Construct and return a `fermipy.roi_model.MapCubeSource` object
Below is the the instruction that describes the task: ### Input: Construct and return a `fermipy.roi_model.MapCubeSource` object ### Response: def make_mapcube_source(name, Spatial_Filename, spectrum): """Construct and return a `fermipy.roi_model.MapCubeSource` object """ data = dict(Spatial_Filename=Spatial_Filename) if spectrum is not None: data.update(spectrum) return roi_model.MapCubeSource(name, data)
def bind(self, func, etype): ''' Wraps around container.bind(). ''' if func not in self._event_cbs: wrapped = self._WrapCB(self, func) self._event_cbs[func] = wrapped else: wrapped = self._event_cbs[func] self.container.bind(wrapped, etype)
Wraps around container.bind().
Below is the the instruction that describes the task: ### Input: Wraps around container.bind(). ### Response: def bind(self, func, etype): ''' Wraps around container.bind(). ''' if func not in self._event_cbs: wrapped = self._WrapCB(self, func) self._event_cbs[func] = wrapped else: wrapped = self._event_cbs[func] self.container.bind(wrapped, etype)
def load_monitoring_config_file(self, clean=True): # pylint: disable=too-many-branches,too-many-statements, too-many-locals """Load main configuration file (alignak.cfg):: * Read all files given in the -c parameters * Read all .cfg files in cfg_dir * Read all files in cfg_file * Create objects (Arbiter, Module) * Set HTTP links info (ssl etc) * Load its own modules * Execute read_configuration hook (for arbiter modules) * Create all objects (Service, Host, Realms ...) * "Compile" configuration (Linkify, explode, apply inheritance, fill default values ...) * Cut conf into parts and prepare it for sending The clean parameter is useful to load a configuration without removing the properties only used to parse the configuration and create the objects. Some utilities (like alignak-backend-import script) may need to avoid the cleaning ;) :param clean: set True to clean the created items :type clean: bool :return: None """ self.loading_configuration = True _t_configuration = time.time() if self.verify_only: # Force adding a console handler to the Alignak logger set_log_console(logging.INFO if not self.debug else logging.DEBUG) # Force the global logger at INFO level set_log_level(logging.INFO if not self.debug else logging.DEBUG) logger.info("-----") logger.info("Arbiter is in configuration check mode") logger.info("Arbiter log level got increased to a minimum of INFO") logger.info("-----") # Maybe we do not have environment file # if not self.alignak_env: # self.exit_on_error("*** No Alignak environment file. Exiting...", exit_code=2) # else: # logger.info("Environment file: %s", self.env_filename) if self.legacy_cfg_files: logger.info("Loading monitored system configuration from legacy files: %s", self.legacy_cfg_files) else: logger.info("No legacy file(s) configured for monitored system configuration") # Alignak global environment file # ------------------------------- # Here we did not yet read the Alignak configuration file (except for the Arbiter daemon # configuration. # We must get the Alignak macros and global configuration parameters # --------------------- # Manage Alignak macros; this before loading the legacy configuration files # with their own potential macros # --------------------- macros = [] # Get the macros / variables declared in the Alignak environment (alignak.ini) file! if self.alignak_env: # The properties defined in the alignak.cfg file are not yet set! So we set the one # got from the environment logger.info("Getting Alignak macros...") alignak_macros = self.alignak_env.get_alignak_macros() if alignak_macros: # Remove the leading and trailing underscores for key in sorted(alignak_macros.keys()): value = alignak_macros[key] if key[0] == '_' or key[0] == '$': key = key[1:] if key[-1] == '_' or key[-1] == '$': key = key[:-1] # Create an old legacy macro format macros.append('$%s$=%s' % (key.upper(), value)) logger.debug("- Alignak macro '$%s$' = %s", key.upper(), value) # and then the global configuration. # The properties defined in the alignak.cfg file are not yet set! So we set the one # got from the appropriate section of the Alignak environment file logger.info("Getting Alignak configuration...") alignak_configuration = self.alignak_env.get_alignak_configuration() if alignak_configuration: for key in sorted(alignak_configuration.keys()): value = alignak_configuration[key] if key.startswith('_'): # Ignore configuration variables prefixed with _ continue if key in self.conf.properties: entry = self.conf.properties[key] setattr(self.conf, key, entry.pythonize(value)) else: setattr(self.conf, key, value) logger.debug("- setting '%s' as %s", key, getattr(self.conf, key)) logger.info("Got Alignak global configuration") self.alignak_name = getattr(self.conf, "alignak_name", self.name) logger.info("Configuration for Alignak: %s", self.alignak_name) if macros: self.conf.load_params(macros) # Here we got the macros and alignak configuration variables from the # alignak.ini configuration! # The self Config object is now initialized with the global Alignak variables. # We can now read and parse the legacy configuration files (if any...) raw_objects = self.conf.read_config_buf( self.conf.read_legacy_cfg_files(self.legacy_cfg_files, self.alignak_env.cfg_files if self.alignak_env else None) ) if self.alignak_name != getattr(self.conf, "alignak_name", self.name): self.alignak_name = getattr(self.conf, "alignak_name", self.name) logger.warning("Alignak name changed from the legacy Cfg files: %s", self.alignak_name) # Maybe conf is already invalid if not self.conf.conf_is_correct: self.conf.show_errors() self.request_stop("*** One or more problems were encountered while " "processing the configuration (first check)...", exit_code=1) if self.legacy_cfg_files: logger.info("I correctly loaded the legacy configuration files") # Hacking some global parameters inherited from Nagios to create # on the fly some Broker modules like for status.dat parameters # or nagios.log one if there are none already available if 'module' not in raw_objects: raw_objects['module'] = [] extra_modules = self.conf.hack_old_nagios_parameters() if extra_modules: logger.info("Some inner modules were configured for Nagios legacy parameters") for _, module in extra_modules: raw_objects['module'].append(module) logger.debug("Extra modules: %s", extra_modules) # Alignak global environment file # ------------------------------- # Here we got the monitored system configuration from the legacy configuration files # We must overload this configuration for the daemons and modules with the configuration # declared in the Alignak environment (alignak.ini) file! if self.alignak_env: # Update the daemons legacy configuration if not complete for daemon_type in ['arbiter', 'scheduler', 'broker', 'poller', 'reactionner', 'receiver']: if daemon_type not in raw_objects: raw_objects[daemon_type] = [] # Get all the Alignak daemons from the configuration logger.info("Getting daemons configuration...") some_daemons = False for daemon_name, daemon_cfg in list(self.alignak_env.get_daemons().items()): logger.info("Got a daemon configuration for %s", daemon_name) if 'type' not in daemon_cfg: self.conf.add_error("Ignoring daemon with an unknown type: %s" % daemon_name) continue some_daemons = True daemon_type = daemon_cfg['type'] daemon_name = daemon_cfg['name'] logger.info("- got a %s named %s, spare: %s", daemon_type, daemon_name, daemon_cfg.get('spare', False)) # If this daemon is found in the legacy configuration, replace this new_cfg_daemons = [] for cfg_daemon in raw_objects[daemon_type]: if cfg_daemon.get('name', 'unset') == daemon_name \ or cfg_daemon.get("%s_name" % daemon_type, 'unset') == [daemon_name]: logger.info(" updating daemon Cfg file configuration") else: new_cfg_daemons.append(cfg_daemon) new_cfg_daemons.append(daemon_cfg) raw_objects[daemon_type] = new_cfg_daemons logger.debug("Checking daemons configuration:") some_legacy_daemons = False for daemon_type in ['arbiter', 'scheduler', 'broker', 'poller', 'reactionner', 'receiver']: for cfg_daemon in raw_objects[daemon_type]: some_legacy_daemons = True if 'name' not in cfg_daemon: cfg_daemon['name'] = cfg_daemon['%s_name' % daemon_type] cfg_daemon['modules'] = \ self.alignak_env.get_modules(daemon_name=cfg_daemon['name']) for module_daemon_type, module in extra_modules: if module_daemon_type == daemon_type: cfg_daemon['modules'].append(module['name']) logger.info("- added an Alignak inner module '%s' to the %s: %s", module['name'], daemon_type, cfg_daemon['name']) logger.debug("- %s / %s: ", daemon_type, cfg_daemon['name']) logger.debug(" %s", cfg_daemon) if not some_legacy_daemons: logger.debug("- No legacy configured daemons.") else: logger.info("- some dameons are configured in legacy Cfg files. " "You should update the configuration with the new Alignak " "configuration file.") if not some_daemons and not some_legacy_daemons: logger.info("- No configured daemons.") # and then get all modules from the configuration logger.info("Getting modules configuration...") if 'module' in raw_objects and raw_objects['module']: # Manage the former parameters module_alias and module_types # - replace with name and type for module_cfg in raw_objects['module']: if 'module_alias' not in module_cfg and 'name' not in module_cfg: self.conf.add_error("Module declared without any 'name' or 'module_alias'") continue else: if 'name' not in module_cfg: module_cfg['name'] = module_cfg['module_alias'] module_cfg.pop('module_alias') if 'module_types' in module_cfg and 'type' not in module_cfg: module_cfg['type'] = module_cfg['module_types'] module_cfg.pop('module_types') logger.debug("Module cfg %s params: %s", module_cfg['name'], module_cfg) for _, module_cfg in list(self.alignak_env.get_modules().items()): logger.info("- got a module %s, type: %s", module_cfg.get('name', 'unset'), module_cfg.get('type', 'untyped')) # If this module is found in the former Cfg files, replace the former configuration for cfg_module in raw_objects['module']: if cfg_module.get('name', 'unset') == [module_cfg['name']]: logger.info(" updating module Cfg file configuration") cfg_module = module_cfg logger.info("Module %s updated parameters: %s", module_cfg['name'], module_cfg) break else: raw_objects['module'].append(module_cfg) logger.debug("Module env %s params: %s", module_cfg['name'], module_cfg) if 'module' in raw_objects and not raw_objects['module']: logger.info("- No configured modules.") # Create objects for our arbiters and modules self.conf.early_create_objects(raw_objects) # Check that an arbiter link exists and create the appropriate relations # If no arbiter exists, create one with the provided data params = {} if self.alignak_env: params = self.alignak_env.get_alignak_configuration() self.conf.early_arbiter_linking(self.name, params) # Search which arbiter I am in the arbiter links list for lnk_arbiter in self.conf.arbiters: logger.debug("I have an arbiter in my configuration: %s", lnk_arbiter.name) if lnk_arbiter.name != self.name: # Arbiter is not me! logger.info("I found another arbiter (%s) in my (%s) configuration", lnk_arbiter.name, self.name) # And this arbiter needs to receive a configuration lnk_arbiter.need_conf = True continue logger.info("I found myself in the configuration: %s", lnk_arbiter.name) if self.link_to_myself is None: # I update only if it does not yet exist (first configuration load)! # I will not change myself because I am simply reloading a configuration ;) self.link_to_myself = lnk_arbiter self.link_to_myself.instance_id = self.name self.link_to_myself.push_flavor = ''.encode('utf-8') # self.link_to_myself.hash = self.conf.hash # Set myself as alive ;) self.link_to_myself.set_alive() # We consider that this arbiter is a master one... self.is_master = not self.link_to_myself.spare if self.is_master: logger.info("I am the master Arbiter.") else: logger.info("I am a spare Arbiter.") # ... and that this arbiter do not need to receive a configuration lnk_arbiter.need_conf = False if not self.link_to_myself: self.conf.show_errors() self.request_stop("Error: I cannot find my own configuration (%s), I bail out. " "To solve this, please change the arbiter name parameter in " "the Alignak configuration file (certainly alignak.ini) " "with the value '%s'." " Thanks." % (self.name, socket.gethostname()), exit_code=1) # Whether I am a spare arbiter, I will parse the whole configuration. This may be useful # if the master fails before sending its configuration to me! # An Arbiter which is not a master one will not go further... # todo: is it a good choice?: # 1/ why reading all the configuration files stuff? # 2/ why not loading configuration data from the modules? # -> Indeed, here, only the main configuration has been fetched by the arbiter. # Perharps, loading only the alignak.ini would be enough for a spare arbiter. # And it will make it simpler to configure... if not self.is_master: logger.info("I am not the master arbiter, I stop parsing the configuration") self.loading_configuration = False return # We load our own modules self.do_load_modules(self.link_to_myself.modules) # Call modules that manage this read configuration pass _ts = time.time() self.hook_point('read_configuration') statsmgr.timer('hook.read_configuration', time.time() - _ts) # Call modules get_alignak_configuration() to load Alignak configuration parameters # todo: re-enable this feature if it is really needed. It is a bit tricky to manage # configuration from our own configuration file and from an external source :( # (example modules: alignak_backend) # _t0 = time.time() # self.load_modules_alignak_configuration() # statsmgr.timer('core.hook.get_alignak_configuration', time.time() - _t0) # Call modules get_objects() to load new objects our own modules # (example modules: alignak_backend) self.load_modules_configuration_objects(raw_objects) # Create objects for all the configuration self.conf.create_objects(raw_objects) # Maybe configuration is already invalid if not self.conf.conf_is_correct: self.conf.show_errors() self.request_stop("*** One or more problems were encountered while processing " "the configuration (second check)...", exit_code=1) # Manage all post-conf modules self.hook_point('early_configuration') # Here we got all our Alignak configuration and the monitored system configuration # from the legacy configuration files and extra modules. logger.info("Preparing configuration...") # Create Template links self.conf.linkify_templates() # All inheritances self.conf.apply_inheritance() # Explode between types self.conf.explode() # Implicit inheritance for services self.conf.apply_implicit_inheritance() # Fill default values for all the configuration objects self.conf.fill_default_configuration() # Remove templates from config self.conf.remove_templates() # Overrides specific service instances properties self.conf.override_properties() # Linkify objects to each other self.conf.linkify() # applying dependencies self.conf.apply_dependencies() # Raise warning about currently unmanaged parameters if self.verify_only: self.conf.warn_about_unmanaged_parameters() # Explode global configuration parameters into Classes self.conf.explode_global_conf() # set our own timezone and propagate it to other satellites self.conf.propagate_timezone_option() # Look for business rules, and create the dep tree self.conf.create_business_rules() # And link them self.conf.create_business_rules_dependencies() # Set my own parameters from the loaded configuration # Last monitoring events self.recent_events = deque(maxlen=int(os.environ.get('ALIGNAK_EVENTS_LOG_COUNT', self.conf.events_log_count))) # Manage all post-conf modules self.hook_point('late_configuration') # Configuration is correct? logger.info("Checking configuration...") self.conf.is_correct() # Clean objects of temporary/unnecessary attributes for live work: if clean: logger.info("Cleaning configuration objects...") self.conf.clean() # Dump Alignak macros logger.debug("Alignak global macros:") macro_resolver = MacroResolver() macro_resolver.init(self.conf) for macro_name in sorted(self.conf.macros): macro_value = macro_resolver.resolve_simple_macros_in_string("$%s$" % macro_name, [], None, None) logger.debug("- $%s$ = %s", macro_name, macro_value) statsmgr.timer('configuration.loading', time.time() - _t_configuration) # REF: doc/alignak-conf-dispatching.png (2) logger.info("Splitting configuration...") self.conf.cut_into_parts() # Here, the self.conf.parts exist # And the realms have some 'packs' # Check if all the configuration daemons will be available if not self.daemons_start(run_daemons=False): self.conf.show_errors() self.request_stop("*** Alignak will not be able to manage the configured daemons. " "Check and update your configuration!", exit_code=1) # Some properties need to be prepared (somehow "flatten"...) before being sent, # This to prepare the configuration that will be sent to our spare arbiter (if any) self.conf.prepare_for_sending() statsmgr.timer('configuration.spliting', time.time() - _t_configuration) # Here, the self.conf.spare_arbiter_conf exist # Still a last configuration check because some things may have changed when # we cut the configuration into parts (eg. hosts and realms consistency) and # when we prepared the configuration for sending if not self.conf.conf_is_correct: # pragma: no cover, not with unit tests. self.conf.show_errors() self.request_stop("Configuration is incorrect, sorry, I bail out", exit_code=1) logger.info("Things look okay - " "No serious problems were detected during the pre-flight check") # Exit if we are just here for config checking if self.verify_only: logger.info("Arbiter %s checked the configuration", self.name) if self.conf.missing_daemons: logger.warning("Some missing daemons were detected in the parsed configuration. " "Nothing to worry about, but you should define them, " "else Alignak will use its default configuration.") # Display found warnings and errors self.conf.show_errors() self.request_stop() del raw_objects # Display found warnings and errors self.conf.show_errors() # Now I have a configuration! self.have_conf = True self.loading_configuration = False statsmgr.timer('configuration.available', time.time() - _t_configuration)
Load main configuration file (alignak.cfg):: * Read all files given in the -c parameters * Read all .cfg files in cfg_dir * Read all files in cfg_file * Create objects (Arbiter, Module) * Set HTTP links info (ssl etc) * Load its own modules * Execute read_configuration hook (for arbiter modules) * Create all objects (Service, Host, Realms ...) * "Compile" configuration (Linkify, explode, apply inheritance, fill default values ...) * Cut conf into parts and prepare it for sending The clean parameter is useful to load a configuration without removing the properties only used to parse the configuration and create the objects. Some utilities (like alignak-backend-import script) may need to avoid the cleaning ;) :param clean: set True to clean the created items :type clean: bool :return: None
Below is the the instruction that describes the task: ### Input: Load main configuration file (alignak.cfg):: * Read all files given in the -c parameters * Read all .cfg files in cfg_dir * Read all files in cfg_file * Create objects (Arbiter, Module) * Set HTTP links info (ssl etc) * Load its own modules * Execute read_configuration hook (for arbiter modules) * Create all objects (Service, Host, Realms ...) * "Compile" configuration (Linkify, explode, apply inheritance, fill default values ...) * Cut conf into parts and prepare it for sending The clean parameter is useful to load a configuration without removing the properties only used to parse the configuration and create the objects. Some utilities (like alignak-backend-import script) may need to avoid the cleaning ;) :param clean: set True to clean the created items :type clean: bool :return: None ### Response: def load_monitoring_config_file(self, clean=True): # pylint: disable=too-many-branches,too-many-statements, too-many-locals """Load main configuration file (alignak.cfg):: * Read all files given in the -c parameters * Read all .cfg files in cfg_dir * Read all files in cfg_file * Create objects (Arbiter, Module) * Set HTTP links info (ssl etc) * Load its own modules * Execute read_configuration hook (for arbiter modules) * Create all objects (Service, Host, Realms ...) * "Compile" configuration (Linkify, explode, apply inheritance, fill default values ...) * Cut conf into parts and prepare it for sending The clean parameter is useful to load a configuration without removing the properties only used to parse the configuration and create the objects. Some utilities (like alignak-backend-import script) may need to avoid the cleaning ;) :param clean: set True to clean the created items :type clean: bool :return: None """ self.loading_configuration = True _t_configuration = time.time() if self.verify_only: # Force adding a console handler to the Alignak logger set_log_console(logging.INFO if not self.debug else logging.DEBUG) # Force the global logger at INFO level set_log_level(logging.INFO if not self.debug else logging.DEBUG) logger.info("-----") logger.info("Arbiter is in configuration check mode") logger.info("Arbiter log level got increased to a minimum of INFO") logger.info("-----") # Maybe we do not have environment file # if not self.alignak_env: # self.exit_on_error("*** No Alignak environment file. Exiting...", exit_code=2) # else: # logger.info("Environment file: %s", self.env_filename) if self.legacy_cfg_files: logger.info("Loading monitored system configuration from legacy files: %s", self.legacy_cfg_files) else: logger.info("No legacy file(s) configured for monitored system configuration") # Alignak global environment file # ------------------------------- # Here we did not yet read the Alignak configuration file (except for the Arbiter daemon # configuration. # We must get the Alignak macros and global configuration parameters # --------------------- # Manage Alignak macros; this before loading the legacy configuration files # with their own potential macros # --------------------- macros = [] # Get the macros / variables declared in the Alignak environment (alignak.ini) file! if self.alignak_env: # The properties defined in the alignak.cfg file are not yet set! So we set the one # got from the environment logger.info("Getting Alignak macros...") alignak_macros = self.alignak_env.get_alignak_macros() if alignak_macros: # Remove the leading and trailing underscores for key in sorted(alignak_macros.keys()): value = alignak_macros[key] if key[0] == '_' or key[0] == '$': key = key[1:] if key[-1] == '_' or key[-1] == '$': key = key[:-1] # Create an old legacy macro format macros.append('$%s$=%s' % (key.upper(), value)) logger.debug("- Alignak macro '$%s$' = %s", key.upper(), value) # and then the global configuration. # The properties defined in the alignak.cfg file are not yet set! So we set the one # got from the appropriate section of the Alignak environment file logger.info("Getting Alignak configuration...") alignak_configuration = self.alignak_env.get_alignak_configuration() if alignak_configuration: for key in sorted(alignak_configuration.keys()): value = alignak_configuration[key] if key.startswith('_'): # Ignore configuration variables prefixed with _ continue if key in self.conf.properties: entry = self.conf.properties[key] setattr(self.conf, key, entry.pythonize(value)) else: setattr(self.conf, key, value) logger.debug("- setting '%s' as %s", key, getattr(self.conf, key)) logger.info("Got Alignak global configuration") self.alignak_name = getattr(self.conf, "alignak_name", self.name) logger.info("Configuration for Alignak: %s", self.alignak_name) if macros: self.conf.load_params(macros) # Here we got the macros and alignak configuration variables from the # alignak.ini configuration! # The self Config object is now initialized with the global Alignak variables. # We can now read and parse the legacy configuration files (if any...) raw_objects = self.conf.read_config_buf( self.conf.read_legacy_cfg_files(self.legacy_cfg_files, self.alignak_env.cfg_files if self.alignak_env else None) ) if self.alignak_name != getattr(self.conf, "alignak_name", self.name): self.alignak_name = getattr(self.conf, "alignak_name", self.name) logger.warning("Alignak name changed from the legacy Cfg files: %s", self.alignak_name) # Maybe conf is already invalid if not self.conf.conf_is_correct: self.conf.show_errors() self.request_stop("*** One or more problems were encountered while " "processing the configuration (first check)...", exit_code=1) if self.legacy_cfg_files: logger.info("I correctly loaded the legacy configuration files") # Hacking some global parameters inherited from Nagios to create # on the fly some Broker modules like for status.dat parameters # or nagios.log one if there are none already available if 'module' not in raw_objects: raw_objects['module'] = [] extra_modules = self.conf.hack_old_nagios_parameters() if extra_modules: logger.info("Some inner modules were configured for Nagios legacy parameters") for _, module in extra_modules: raw_objects['module'].append(module) logger.debug("Extra modules: %s", extra_modules) # Alignak global environment file # ------------------------------- # Here we got the monitored system configuration from the legacy configuration files # We must overload this configuration for the daemons and modules with the configuration # declared in the Alignak environment (alignak.ini) file! if self.alignak_env: # Update the daemons legacy configuration if not complete for daemon_type in ['arbiter', 'scheduler', 'broker', 'poller', 'reactionner', 'receiver']: if daemon_type not in raw_objects: raw_objects[daemon_type] = [] # Get all the Alignak daemons from the configuration logger.info("Getting daemons configuration...") some_daemons = False for daemon_name, daemon_cfg in list(self.alignak_env.get_daemons().items()): logger.info("Got a daemon configuration for %s", daemon_name) if 'type' not in daemon_cfg: self.conf.add_error("Ignoring daemon with an unknown type: %s" % daemon_name) continue some_daemons = True daemon_type = daemon_cfg['type'] daemon_name = daemon_cfg['name'] logger.info("- got a %s named %s, spare: %s", daemon_type, daemon_name, daemon_cfg.get('spare', False)) # If this daemon is found in the legacy configuration, replace this new_cfg_daemons = [] for cfg_daemon in raw_objects[daemon_type]: if cfg_daemon.get('name', 'unset') == daemon_name \ or cfg_daemon.get("%s_name" % daemon_type, 'unset') == [daemon_name]: logger.info(" updating daemon Cfg file configuration") else: new_cfg_daemons.append(cfg_daemon) new_cfg_daemons.append(daemon_cfg) raw_objects[daemon_type] = new_cfg_daemons logger.debug("Checking daemons configuration:") some_legacy_daemons = False for daemon_type in ['arbiter', 'scheduler', 'broker', 'poller', 'reactionner', 'receiver']: for cfg_daemon in raw_objects[daemon_type]: some_legacy_daemons = True if 'name' not in cfg_daemon: cfg_daemon['name'] = cfg_daemon['%s_name' % daemon_type] cfg_daemon['modules'] = \ self.alignak_env.get_modules(daemon_name=cfg_daemon['name']) for module_daemon_type, module in extra_modules: if module_daemon_type == daemon_type: cfg_daemon['modules'].append(module['name']) logger.info("- added an Alignak inner module '%s' to the %s: %s", module['name'], daemon_type, cfg_daemon['name']) logger.debug("- %s / %s: ", daemon_type, cfg_daemon['name']) logger.debug(" %s", cfg_daemon) if not some_legacy_daemons: logger.debug("- No legacy configured daemons.") else: logger.info("- some dameons are configured in legacy Cfg files. " "You should update the configuration with the new Alignak " "configuration file.") if not some_daemons and not some_legacy_daemons: logger.info("- No configured daemons.") # and then get all modules from the configuration logger.info("Getting modules configuration...") if 'module' in raw_objects and raw_objects['module']: # Manage the former parameters module_alias and module_types # - replace with name and type for module_cfg in raw_objects['module']: if 'module_alias' not in module_cfg and 'name' not in module_cfg: self.conf.add_error("Module declared without any 'name' or 'module_alias'") continue else: if 'name' not in module_cfg: module_cfg['name'] = module_cfg['module_alias'] module_cfg.pop('module_alias') if 'module_types' in module_cfg and 'type' not in module_cfg: module_cfg['type'] = module_cfg['module_types'] module_cfg.pop('module_types') logger.debug("Module cfg %s params: %s", module_cfg['name'], module_cfg) for _, module_cfg in list(self.alignak_env.get_modules().items()): logger.info("- got a module %s, type: %s", module_cfg.get('name', 'unset'), module_cfg.get('type', 'untyped')) # If this module is found in the former Cfg files, replace the former configuration for cfg_module in raw_objects['module']: if cfg_module.get('name', 'unset') == [module_cfg['name']]: logger.info(" updating module Cfg file configuration") cfg_module = module_cfg logger.info("Module %s updated parameters: %s", module_cfg['name'], module_cfg) break else: raw_objects['module'].append(module_cfg) logger.debug("Module env %s params: %s", module_cfg['name'], module_cfg) if 'module' in raw_objects and not raw_objects['module']: logger.info("- No configured modules.") # Create objects for our arbiters and modules self.conf.early_create_objects(raw_objects) # Check that an arbiter link exists and create the appropriate relations # If no arbiter exists, create one with the provided data params = {} if self.alignak_env: params = self.alignak_env.get_alignak_configuration() self.conf.early_arbiter_linking(self.name, params) # Search which arbiter I am in the arbiter links list for lnk_arbiter in self.conf.arbiters: logger.debug("I have an arbiter in my configuration: %s", lnk_arbiter.name) if lnk_arbiter.name != self.name: # Arbiter is not me! logger.info("I found another arbiter (%s) in my (%s) configuration", lnk_arbiter.name, self.name) # And this arbiter needs to receive a configuration lnk_arbiter.need_conf = True continue logger.info("I found myself in the configuration: %s", lnk_arbiter.name) if self.link_to_myself is None: # I update only if it does not yet exist (first configuration load)! # I will not change myself because I am simply reloading a configuration ;) self.link_to_myself = lnk_arbiter self.link_to_myself.instance_id = self.name self.link_to_myself.push_flavor = ''.encode('utf-8') # self.link_to_myself.hash = self.conf.hash # Set myself as alive ;) self.link_to_myself.set_alive() # We consider that this arbiter is a master one... self.is_master = not self.link_to_myself.spare if self.is_master: logger.info("I am the master Arbiter.") else: logger.info("I am a spare Arbiter.") # ... and that this arbiter do not need to receive a configuration lnk_arbiter.need_conf = False if not self.link_to_myself: self.conf.show_errors() self.request_stop("Error: I cannot find my own configuration (%s), I bail out. " "To solve this, please change the arbiter name parameter in " "the Alignak configuration file (certainly alignak.ini) " "with the value '%s'." " Thanks." % (self.name, socket.gethostname()), exit_code=1) # Whether I am a spare arbiter, I will parse the whole configuration. This may be useful # if the master fails before sending its configuration to me! # An Arbiter which is not a master one will not go further... # todo: is it a good choice?: # 1/ why reading all the configuration files stuff? # 2/ why not loading configuration data from the modules? # -> Indeed, here, only the main configuration has been fetched by the arbiter. # Perharps, loading only the alignak.ini would be enough for a spare arbiter. # And it will make it simpler to configure... if not self.is_master: logger.info("I am not the master arbiter, I stop parsing the configuration") self.loading_configuration = False return # We load our own modules self.do_load_modules(self.link_to_myself.modules) # Call modules that manage this read configuration pass _ts = time.time() self.hook_point('read_configuration') statsmgr.timer('hook.read_configuration', time.time() - _ts) # Call modules get_alignak_configuration() to load Alignak configuration parameters # todo: re-enable this feature if it is really needed. It is a bit tricky to manage # configuration from our own configuration file and from an external source :( # (example modules: alignak_backend) # _t0 = time.time() # self.load_modules_alignak_configuration() # statsmgr.timer('core.hook.get_alignak_configuration', time.time() - _t0) # Call modules get_objects() to load new objects our own modules # (example modules: alignak_backend) self.load_modules_configuration_objects(raw_objects) # Create objects for all the configuration self.conf.create_objects(raw_objects) # Maybe configuration is already invalid if not self.conf.conf_is_correct: self.conf.show_errors() self.request_stop("*** One or more problems were encountered while processing " "the configuration (second check)...", exit_code=1) # Manage all post-conf modules self.hook_point('early_configuration') # Here we got all our Alignak configuration and the monitored system configuration # from the legacy configuration files and extra modules. logger.info("Preparing configuration...") # Create Template links self.conf.linkify_templates() # All inheritances self.conf.apply_inheritance() # Explode between types self.conf.explode() # Implicit inheritance for services self.conf.apply_implicit_inheritance() # Fill default values for all the configuration objects self.conf.fill_default_configuration() # Remove templates from config self.conf.remove_templates() # Overrides specific service instances properties self.conf.override_properties() # Linkify objects to each other self.conf.linkify() # applying dependencies self.conf.apply_dependencies() # Raise warning about currently unmanaged parameters if self.verify_only: self.conf.warn_about_unmanaged_parameters() # Explode global configuration parameters into Classes self.conf.explode_global_conf() # set our own timezone and propagate it to other satellites self.conf.propagate_timezone_option() # Look for business rules, and create the dep tree self.conf.create_business_rules() # And link them self.conf.create_business_rules_dependencies() # Set my own parameters from the loaded configuration # Last monitoring events self.recent_events = deque(maxlen=int(os.environ.get('ALIGNAK_EVENTS_LOG_COUNT', self.conf.events_log_count))) # Manage all post-conf modules self.hook_point('late_configuration') # Configuration is correct? logger.info("Checking configuration...") self.conf.is_correct() # Clean objects of temporary/unnecessary attributes for live work: if clean: logger.info("Cleaning configuration objects...") self.conf.clean() # Dump Alignak macros logger.debug("Alignak global macros:") macro_resolver = MacroResolver() macro_resolver.init(self.conf) for macro_name in sorted(self.conf.macros): macro_value = macro_resolver.resolve_simple_macros_in_string("$%s$" % macro_name, [], None, None) logger.debug("- $%s$ = %s", macro_name, macro_value) statsmgr.timer('configuration.loading', time.time() - _t_configuration) # REF: doc/alignak-conf-dispatching.png (2) logger.info("Splitting configuration...") self.conf.cut_into_parts() # Here, the self.conf.parts exist # And the realms have some 'packs' # Check if all the configuration daemons will be available if not self.daemons_start(run_daemons=False): self.conf.show_errors() self.request_stop("*** Alignak will not be able to manage the configured daemons. " "Check and update your configuration!", exit_code=1) # Some properties need to be prepared (somehow "flatten"...) before being sent, # This to prepare the configuration that will be sent to our spare arbiter (if any) self.conf.prepare_for_sending() statsmgr.timer('configuration.spliting', time.time() - _t_configuration) # Here, the self.conf.spare_arbiter_conf exist # Still a last configuration check because some things may have changed when # we cut the configuration into parts (eg. hosts and realms consistency) and # when we prepared the configuration for sending if not self.conf.conf_is_correct: # pragma: no cover, not with unit tests. self.conf.show_errors() self.request_stop("Configuration is incorrect, sorry, I bail out", exit_code=1) logger.info("Things look okay - " "No serious problems were detected during the pre-flight check") # Exit if we are just here for config checking if self.verify_only: logger.info("Arbiter %s checked the configuration", self.name) if self.conf.missing_daemons: logger.warning("Some missing daemons were detected in the parsed configuration. " "Nothing to worry about, but you should define them, " "else Alignak will use its default configuration.") # Display found warnings and errors self.conf.show_errors() self.request_stop() del raw_objects # Display found warnings and errors self.conf.show_errors() # Now I have a configuration! self.have_conf = True self.loading_configuration = False statsmgr.timer('configuration.available', time.time() - _t_configuration)
def set_inhibit(self, inhibit): """Set inhibition state""" if self._pid and inhibit != self._inhibited: os.kill(self._pid, signal.SIGUSR1) self._inhibited = inhibit
Set inhibition state
Below is the the instruction that describes the task: ### Input: Set inhibition state ### Response: def set_inhibit(self, inhibit): """Set inhibition state""" if self._pid and inhibit != self._inhibited: os.kill(self._pid, signal.SIGUSR1) self._inhibited = inhibit
def section(self, section_title): """ Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API""" section = "== {0} ==".format(section_title) try: content = self.content index = content.index(section) + len(section) # ensure we have the full section header... while True: if content[index + 1] == "=": index += 1 else: break except ValueError: return None except IndexError: pass try: next_index = self.content.index("==", index) except ValueError: next_index = len(self.content) return self.content[index:next_index].lstrip("=").strip()
Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API
Below is the the instruction that describes the task: ### Input: Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API ### Response: def section(self, section_title): """ Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API""" section = "== {0} ==".format(section_title) try: content = self.content index = content.index(section) + len(section) # ensure we have the full section header... while True: if content[index + 1] == "=": index += 1 else: break except ValueError: return None except IndexError: pass try: next_index = self.content.index("==", index) except ValueError: next_index = len(self.content) return self.content[index:next_index].lstrip("=").strip()
def process(self, sched, coro): """Add the given coroutine in the scheduler.""" super(AddCoro, self).process(sched, coro) self.result = sched.add(self.coro, self.args, self.kwargs, self.prio & priority.OP) if self.prio & priority.CORO: return self, coro else: sched.active.append( (None, coro))
Add the given coroutine in the scheduler.
Below is the the instruction that describes the task: ### Input: Add the given coroutine in the scheduler. ### Response: def process(self, sched, coro): """Add the given coroutine in the scheduler.""" super(AddCoro, self).process(sched, coro) self.result = sched.add(self.coro, self.args, self.kwargs, self.prio & priority.OP) if self.prio & priority.CORO: return self, coro else: sched.active.append( (None, coro))
async def _dump_container_size( self, writer, container_len, container_type, params=None ): """ Dumps container size - per element streaming :param writer: :param container_len: :param container_type: :param params: :return: """ if not container_type or not container_type.FIX_SIZE: await dump_uvarint(writer, container_len) elif container_len != container_type.SIZE: raise ValueError( "Fixed size container has not defined size: %s" % container_type.SIZE )
Dumps container size - per element streaming :param writer: :param container_len: :param container_type: :param params: :return:
Below is the the instruction that describes the task: ### Input: Dumps container size - per element streaming :param writer: :param container_len: :param container_type: :param params: :return: ### Response: async def _dump_container_size( self, writer, container_len, container_type, params=None ): """ Dumps container size - per element streaming :param writer: :param container_len: :param container_type: :param params: :return: """ if not container_type or not container_type.FIX_SIZE: await dump_uvarint(writer, container_len) elif container_len != container_type.SIZE: raise ValueError( "Fixed size container has not defined size: %s" % container_type.SIZE )
def vhel_to_vgsr(coordinate, vhel, vsun): """ Convert a velocity from a heliocentric radial velocity to the Galactic standard of rest (GSR). Parameters ---------- coordinate : :class:`~astropy.coordinates.SkyCoord` An Astropy SkyCoord object or anything object that can be passed to the SkyCoord initializer. vhel : :class:`~astropy.units.Quantity` Barycentric line-of-sight velocity. vsun : :class:`~astropy.units.Quantity` Full-space velocity of the sun in a Galactocentric frame. By default, uses the value assumed by Astropy in `~astropy.coordinates.Galactocentric`. Returns ------- vgsr : :class:`~astropy.units.Quantity` Radial velocity in a galactocentric rest frame. """ if vsun is None: vsun = coord.Galactocentric.galcen_v_sun.to_cartesian().xyz return vhel + _get_vproj(coordinate, vsun)
Convert a velocity from a heliocentric radial velocity to the Galactic standard of rest (GSR). Parameters ---------- coordinate : :class:`~astropy.coordinates.SkyCoord` An Astropy SkyCoord object or anything object that can be passed to the SkyCoord initializer. vhel : :class:`~astropy.units.Quantity` Barycentric line-of-sight velocity. vsun : :class:`~astropy.units.Quantity` Full-space velocity of the sun in a Galactocentric frame. By default, uses the value assumed by Astropy in `~astropy.coordinates.Galactocentric`. Returns ------- vgsr : :class:`~astropy.units.Quantity` Radial velocity in a galactocentric rest frame.
Below is the the instruction that describes the task: ### Input: Convert a velocity from a heliocentric radial velocity to the Galactic standard of rest (GSR). Parameters ---------- coordinate : :class:`~astropy.coordinates.SkyCoord` An Astropy SkyCoord object or anything object that can be passed to the SkyCoord initializer. vhel : :class:`~astropy.units.Quantity` Barycentric line-of-sight velocity. vsun : :class:`~astropy.units.Quantity` Full-space velocity of the sun in a Galactocentric frame. By default, uses the value assumed by Astropy in `~astropy.coordinates.Galactocentric`. Returns ------- vgsr : :class:`~astropy.units.Quantity` Radial velocity in a galactocentric rest frame. ### Response: def vhel_to_vgsr(coordinate, vhel, vsun): """ Convert a velocity from a heliocentric radial velocity to the Galactic standard of rest (GSR). Parameters ---------- coordinate : :class:`~astropy.coordinates.SkyCoord` An Astropy SkyCoord object or anything object that can be passed to the SkyCoord initializer. vhel : :class:`~astropy.units.Quantity` Barycentric line-of-sight velocity. vsun : :class:`~astropy.units.Quantity` Full-space velocity of the sun in a Galactocentric frame. By default, uses the value assumed by Astropy in `~astropy.coordinates.Galactocentric`. Returns ------- vgsr : :class:`~astropy.units.Quantity` Radial velocity in a galactocentric rest frame. """ if vsun is None: vsun = coord.Galactocentric.galcen_v_sun.to_cartesian().xyz return vhel + _get_vproj(coordinate, vsun)
def comply(self, path): """Issues a chown and chmod to the file paths specified.""" utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, self.mode)
Issues a chown and chmod to the file paths specified.
Below is the the instruction that describes the task: ### Input: Issues a chown and chmod to the file paths specified. ### Response: def comply(self, path): """Issues a chown and chmod to the file paths specified.""" utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, self.mode)
def get_models(cls, index, as_class=False): ''' Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string. ''' try: return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys() except KeyError: raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string.
Below is the the instruction that describes the task: ### Input: Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string. ### Response: def get_models(cls, index, as_class=False): ''' Returns the list of models defined for this index. :param index: index name. :param as_class: set to True to return the model as a model object instead of as a string. ''' try: return cls._index_to_model[index] if as_class else cls._idx_name_to_mdl_to_mdlidx[index].keys() except KeyError: raise KeyError('Could not find any index named {}. Is this index defined in BUNGIESEARCH["INDICES"]?'.format(index))
def find_users_by_email_and_group(email_prefix=None, group=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """ email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_and_group(email_prefix, group), page_size, cursor, cache_begin=None)
Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command.
Below is the the instruction that describes the task: ### Input: Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. ### Response: def find_users_by_email_and_group(email_prefix=None, group=None, cursor=None, page_size=30): """ Returns a command that retrieves users by its email_prefix, ordered by email and by Group. If Group is None, only users without any group are going to be searched It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in a next call. It is provided in cursor attribute from command. """ email_prefix = email_prefix or '' return ModelSearchCommand(MainUser.query_email_and_group(email_prefix, group), page_size, cursor, cache_begin=None)
def compare_dicts(i): """ Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters Note that if dict1 and dict2 has lists, the results will be as follows: * dict1={"key":['a','b','c']} dict2={"key":['a','b']} EQUAL * dict1={"key":['a','b']} dict2={"key":['a','b','c']} NOT EQUAL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal } """ d1=i.get('dict1',{}) d2=i.get('dict2',{}) equal='yes' bic=False ic=i.get('ignore_case','') if ic=='yes': bic=True for q2 in d2: v2=d2[q2] if type(v2)==dict: if q2 not in d1: equal='no' break v1=d1[q2] rx=compare_dicts({'dict1':v1,'dict2':v2, 'ignore_case':ic}) if rx['return']>0: return rx equal=rx['equal'] if equal=='no': break elif type(v2)==list: # For now can check only values in list if q2 not in d1: equal='no' break v1=d1[q2] if type(v1)!=list: equal='no' break for m in v2: if m not in v1: equal='no' break if equal=='no': break else: if q2 not in d1: equal='no' break if equal=='no': break v1=d1[q2] if bic and type(v1)!=int and type(v1)!=float and type(v1)!=bool: v1=v1.lower() v2=v2.lower() if v2!=v1: equal='no' break return {'return':0, 'equal':equal}
Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters Note that if dict1 and dict2 has lists, the results will be as follows: * dict1={"key":['a','b','c']} dict2={"key":['a','b']} EQUAL * dict1={"key":['a','b']} dict2={"key":['a','b','c']} NOT EQUAL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal }
Below is the the instruction that describes the task: ### Input: Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters Note that if dict1 and dict2 has lists, the results will be as follows: * dict1={"key":['a','b','c']} dict2={"key":['a','b']} EQUAL * dict1={"key":['a','b']} dict2={"key":['a','b','c']} NOT EQUAL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal } ### Response: def compare_dicts(i): """ Input: { dict1 - dictionary 1 dict2 - dictionary 2 (ignore_case) - ignore case of letters Note that if dict1 and dict2 has lists, the results will be as follows: * dict1={"key":['a','b','c']} dict2={"key":['a','b']} EQUAL * dict1={"key":['a','b']} dict2={"key":['a','b','c']} NOT EQUAL } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 equal - if 'yes' dictionaries are equal } """ d1=i.get('dict1',{}) d2=i.get('dict2',{}) equal='yes' bic=False ic=i.get('ignore_case','') if ic=='yes': bic=True for q2 in d2: v2=d2[q2] if type(v2)==dict: if q2 not in d1: equal='no' break v1=d1[q2] rx=compare_dicts({'dict1':v1,'dict2':v2, 'ignore_case':ic}) if rx['return']>0: return rx equal=rx['equal'] if equal=='no': break elif type(v2)==list: # For now can check only values in list if q2 not in d1: equal='no' break v1=d1[q2] if type(v1)!=list: equal='no' break for m in v2: if m not in v1: equal='no' break if equal=='no': break else: if q2 not in d1: equal='no' break if equal=='no': break v1=d1[q2] if bic and type(v1)!=int and type(v1)!=float and type(v1)!=bool: v1=v1.lower() v2=v2.lower() if v2!=v1: equal='no' break return {'return':0, 'equal':equal}
def get_qual_range(qual_str): """ Get range of the Unicode encode range for a given string of characters. The encoding is determined from the result of the :py:func:`ord` built-in. Parameters ---------- qual_str : str Arbitrary string. Returns ------- x : tuple (Minimum Unicode code, Maximum Unicode code). """ vals = [ord(c) for c in qual_str] return min(vals), max(vals)
Get range of the Unicode encode range for a given string of characters. The encoding is determined from the result of the :py:func:`ord` built-in. Parameters ---------- qual_str : str Arbitrary string. Returns ------- x : tuple (Minimum Unicode code, Maximum Unicode code).
Below is the the instruction that describes the task: ### Input: Get range of the Unicode encode range for a given string of characters. The encoding is determined from the result of the :py:func:`ord` built-in. Parameters ---------- qual_str : str Arbitrary string. Returns ------- x : tuple (Minimum Unicode code, Maximum Unicode code). ### Response: def get_qual_range(qual_str): """ Get range of the Unicode encode range for a given string of characters. The encoding is determined from the result of the :py:func:`ord` built-in. Parameters ---------- qual_str : str Arbitrary string. Returns ------- x : tuple (Minimum Unicode code, Maximum Unicode code). """ vals = [ord(c) for c in qual_str] return min(vals), max(vals)
def get_r_df(self): ''' getter ''' if isinstance(self.__r_df, pd.DataFrame) is False and self.__r_df is not None: raise TypeError("The type of `__r_df` must be `pd.DataFrame`.") return self.__r_df
getter
Below is the the instruction that describes the task: ### Input: getter ### Response: def get_r_df(self): ''' getter ''' if isinstance(self.__r_df, pd.DataFrame) is False and self.__r_df is not None: raise TypeError("The type of `__r_df` must be `pd.DataFrame`.") return self.__r_df
def setattr(self, name, value): """Set an attribute to a new value for all Parameters. For example, set grad_req to null if you don't need gradient w.r.t a model's Parameters:: model.collect_params().setattr('grad_req', 'null') or change the learning rate multiplier:: model.collect_params().setattr('lr_mult', 0.5) Parameters ---------- name : str Name of the attribute. value : valid type for attribute name The new value for the attribute. """ for i in self.values(): setattr(i, name, value)
Set an attribute to a new value for all Parameters. For example, set grad_req to null if you don't need gradient w.r.t a model's Parameters:: model.collect_params().setattr('grad_req', 'null') or change the learning rate multiplier:: model.collect_params().setattr('lr_mult', 0.5) Parameters ---------- name : str Name of the attribute. value : valid type for attribute name The new value for the attribute.
Below is the the instruction that describes the task: ### Input: Set an attribute to a new value for all Parameters. For example, set grad_req to null if you don't need gradient w.r.t a model's Parameters:: model.collect_params().setattr('grad_req', 'null') or change the learning rate multiplier:: model.collect_params().setattr('lr_mult', 0.5) Parameters ---------- name : str Name of the attribute. value : valid type for attribute name The new value for the attribute. ### Response: def setattr(self, name, value): """Set an attribute to a new value for all Parameters. For example, set grad_req to null if you don't need gradient w.r.t a model's Parameters:: model.collect_params().setattr('grad_req', 'null') or change the learning rate multiplier:: model.collect_params().setattr('lr_mult', 0.5) Parameters ---------- name : str Name of the attribute. value : valid type for attribute name The new value for the attribute. """ for i in self.values(): setattr(i, name, value)
def _run_qmc(self, boot): """ Runs quartet max-cut QMC on the quartets qdump file. """ ## build command self._tmp = os.path.join(self.dirs, ".tmptre") cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp] ## run it proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) res = proc.communicate() if proc.returncode: raise IPyradWarningExit(res[1]) ## parse tmp file written by qmc into a tree and rename it with open(self._tmp, 'r') as intree: tre = ete3.Tree(intree.read().strip()) names = tre.get_leaves() for name in names: name.name = self.samples[int(name.name)] tmptre = tre.write(format=9) ## save the tree to file if boot: self.trees.boots = os.path.join(self.dirs, self.name+".boots") with open(self.trees.boots, 'a') as outboot: outboot.write(tmptre+"\n") else: self.trees.tree = os.path.join(self.dirs, self.name+".tree") with open(self.trees.tree, 'w') as outtree: outtree.write(tmptre) ## save the file self._save()
Runs quartet max-cut QMC on the quartets qdump file.
Below is the the instruction that describes the task: ### Input: Runs quartet max-cut QMC on the quartets qdump file. ### Response: def _run_qmc(self, boot): """ Runs quartet max-cut QMC on the quartets qdump file. """ ## build command self._tmp = os.path.join(self.dirs, ".tmptre") cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp] ## run it proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) res = proc.communicate() if proc.returncode: raise IPyradWarningExit(res[1]) ## parse tmp file written by qmc into a tree and rename it with open(self._tmp, 'r') as intree: tre = ete3.Tree(intree.read().strip()) names = tre.get_leaves() for name in names: name.name = self.samples[int(name.name)] tmptre = tre.write(format=9) ## save the tree to file if boot: self.trees.boots = os.path.join(self.dirs, self.name+".boots") with open(self.trees.boots, 'a') as outboot: outboot.write(tmptre+"\n") else: self.trees.tree = os.path.join(self.dirs, self.name+".tree") with open(self.trees.tree, 'w') as outtree: outtree.write(tmptre) ## save the file self._save()
def display_name(self): """Readable name for the variant.""" if self.is_snv: gene_ids = self.gene_symbols[:2] return ', '.join(gene_ids) else: return "{this.cytoband_start} ({this.sv_len})".format(this=self)
Readable name for the variant.
Below is the the instruction that describes the task: ### Input: Readable name for the variant. ### Response: def display_name(self): """Readable name for the variant.""" if self.is_snv: gene_ids = self.gene_symbols[:2] return ', '.join(gene_ids) else: return "{this.cytoband_start} ({this.sv_len})".format(this=self)
def calc_shape_statistics(self, stat_names): """ Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics """ stats = {} try: all_props = [regionprops(m) for m in self.masks] except TypeError: print(self.masks) exit() for stat in stat_names: stats[stat] = np.mean([p[0][stat] for p in all_props]) return stats
Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics
Below is the the instruction that describes the task: ### Input: Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics ### Response: def calc_shape_statistics(self, stat_names): """ Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics """ stats = {} try: all_props = [regionprops(m) for m in self.masks] except TypeError: print(self.masks) exit() for stat in stat_names: stats[stat] = np.mean([p[0][stat] for p in all_props]) return stats
def lookAtSpheroid(lat0: float, lon0: float, h0: float, az: float, tilt: float, ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]: """ Calculates line-of-sight intersection with Earth (or other ellipsoid) surface from above surface / orbit Parameters ---------- lat0 : float observer geodetic latitude lon0 : float observer geodetic longitude h0 : float observer altitude (meters) Must be non-negative since this function doesn't consider terrain az : float or numpy.ndarray of float azimuth angle of line-of-sight, clockwise from North tilt : float or numpy.ndarray of float tilt angle of line-of-sight with respect to local vertical (nadir = 0) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- lat0 : float or numpy.ndarray of float geodetic latitude where the line-of-sight intersects with the Earth ellipsoid lon0 : float or numpy.ndarray of float geodetic longitude where the line-of-sight intersects with the Earth ellipsoid d : float or numpy.ndarray of float slant range (meters) from starting point to intersect point Values will be NaN if the line of sight does not intersect. Algorithm based on https://medium.com/@stephenhartzell/satellite-line-of-sight-intersection-with-earth-d786b4a6a9b6 Stephen Hartzell """ if (np.asarray(h0) < 0).any(): raise ValueError('Intersection calculation requires altitude [0, Infinity)') if ell is None: ell = Ellipsoid() tilt = np.asarray(tilt) a = ell.a b = ell.a c = ell.b el = tilt - 90. if deg else tilt - pi / 2 e, n, u = aer2enu(az, el, srange=1., deg=deg) # fixed 1 km slant range u, v, w = enu2uvw(e, n, u, lat0, lon0, deg=deg) x, y, z = geodetic2ecef(lat0, lon0, h0, deg=deg) value = -a**2 * b**2 * w * z - a**2 * c**2 * v * y - b**2 * c**2 * u * x radical = (a**2 * b**2 * w**2 + a**2 * c**2 * v**2 - a**2 * v**2 * z**2 + 2 * a**2 * v * w * y * z - a**2 * w**2 * y**2 + b**2 * c**2 * u**2 - b**2 * u**2 * z**2 + 2 * b**2 * u * w * x * z - b**2 * w**2 * x**2 - c**2 * u**2 * y**2 + 2 * c**2 * u * v * x * y - c**2 * v**2 * x**2) magnitude = a**2 * b**2 * w**2 + a**2 * c**2 * v**2 + b**2 * c**2 * u**2 # %% Return nan if radical < 0 or d < 0 because LOS vector does not point towards Earth with np.errstate(invalid='ignore'): d = np.where(radical > 0, (value - a * b * c * np.sqrt(radical)) / magnitude, np.nan) d[d < 0] = np.nan # %% cartesian to ellipsodal lat, lon, _ = ecef2geodetic(x + d * u, y + d * v, z + d * w, deg=deg) return lat, lon, d
Calculates line-of-sight intersection with Earth (or other ellipsoid) surface from above surface / orbit Parameters ---------- lat0 : float observer geodetic latitude lon0 : float observer geodetic longitude h0 : float observer altitude (meters) Must be non-negative since this function doesn't consider terrain az : float or numpy.ndarray of float azimuth angle of line-of-sight, clockwise from North tilt : float or numpy.ndarray of float tilt angle of line-of-sight with respect to local vertical (nadir = 0) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- lat0 : float or numpy.ndarray of float geodetic latitude where the line-of-sight intersects with the Earth ellipsoid lon0 : float or numpy.ndarray of float geodetic longitude where the line-of-sight intersects with the Earth ellipsoid d : float or numpy.ndarray of float slant range (meters) from starting point to intersect point Values will be NaN if the line of sight does not intersect. Algorithm based on https://medium.com/@stephenhartzell/satellite-line-of-sight-intersection-with-earth-d786b4a6a9b6 Stephen Hartzell
Below is the the instruction that describes the task: ### Input: Calculates line-of-sight intersection with Earth (or other ellipsoid) surface from above surface / orbit Parameters ---------- lat0 : float observer geodetic latitude lon0 : float observer geodetic longitude h0 : float observer altitude (meters) Must be non-negative since this function doesn't consider terrain az : float or numpy.ndarray of float azimuth angle of line-of-sight, clockwise from North tilt : float or numpy.ndarray of float tilt angle of line-of-sight with respect to local vertical (nadir = 0) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- lat0 : float or numpy.ndarray of float geodetic latitude where the line-of-sight intersects with the Earth ellipsoid lon0 : float or numpy.ndarray of float geodetic longitude where the line-of-sight intersects with the Earth ellipsoid d : float or numpy.ndarray of float slant range (meters) from starting point to intersect point Values will be NaN if the line of sight does not intersect. Algorithm based on https://medium.com/@stephenhartzell/satellite-line-of-sight-intersection-with-earth-d786b4a6a9b6 Stephen Hartzell ### Response: def lookAtSpheroid(lat0: float, lon0: float, h0: float, az: float, tilt: float, ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]: """ Calculates line-of-sight intersection with Earth (or other ellipsoid) surface from above surface / orbit Parameters ---------- lat0 : float observer geodetic latitude lon0 : float observer geodetic longitude h0 : float observer altitude (meters) Must be non-negative since this function doesn't consider terrain az : float or numpy.ndarray of float azimuth angle of line-of-sight, clockwise from North tilt : float or numpy.ndarray of float tilt angle of line-of-sight with respect to local vertical (nadir = 0) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- lat0 : float or numpy.ndarray of float geodetic latitude where the line-of-sight intersects with the Earth ellipsoid lon0 : float or numpy.ndarray of float geodetic longitude where the line-of-sight intersects with the Earth ellipsoid d : float or numpy.ndarray of float slant range (meters) from starting point to intersect point Values will be NaN if the line of sight does not intersect. Algorithm based on https://medium.com/@stephenhartzell/satellite-line-of-sight-intersection-with-earth-d786b4a6a9b6 Stephen Hartzell """ if (np.asarray(h0) < 0).any(): raise ValueError('Intersection calculation requires altitude [0, Infinity)') if ell is None: ell = Ellipsoid() tilt = np.asarray(tilt) a = ell.a b = ell.a c = ell.b el = tilt - 90. if deg else tilt - pi / 2 e, n, u = aer2enu(az, el, srange=1., deg=deg) # fixed 1 km slant range u, v, w = enu2uvw(e, n, u, lat0, lon0, deg=deg) x, y, z = geodetic2ecef(lat0, lon0, h0, deg=deg) value = -a**2 * b**2 * w * z - a**2 * c**2 * v * y - b**2 * c**2 * u * x radical = (a**2 * b**2 * w**2 + a**2 * c**2 * v**2 - a**2 * v**2 * z**2 + 2 * a**2 * v * w * y * z - a**2 * w**2 * y**2 + b**2 * c**2 * u**2 - b**2 * u**2 * z**2 + 2 * b**2 * u * w * x * z - b**2 * w**2 * x**2 - c**2 * u**2 * y**2 + 2 * c**2 * u * v * x * y - c**2 * v**2 * x**2) magnitude = a**2 * b**2 * w**2 + a**2 * c**2 * v**2 + b**2 * c**2 * u**2 # %% Return nan if radical < 0 or d < 0 because LOS vector does not point towards Earth with np.errstate(invalid='ignore'): d = np.where(radical > 0, (value - a * b * c * np.sqrt(radical)) / magnitude, np.nan) d[d < 0] = np.nan # %% cartesian to ellipsodal lat, lon, _ = ecef2geodetic(x + d * u, y + d * v, z + d * w, deg=deg) return lat, lon, d
def set_association_id(self, assoc_id=None): """ This will set the association ID based on the internal parts of the association. To be used in cases where an external association identifier should be used. :param assoc_id: :return: """ if assoc_id is None: self.assoc_id = self.make_association_id( self.definedby, self.sub, self.rel, self.obj) else: self.assoc_id = assoc_id return self.assoc_id
This will set the association ID based on the internal parts of the association. To be used in cases where an external association identifier should be used. :param assoc_id: :return:
Below is the the instruction that describes the task: ### Input: This will set the association ID based on the internal parts of the association. To be used in cases where an external association identifier should be used. :param assoc_id: :return: ### Response: def set_association_id(self, assoc_id=None): """ This will set the association ID based on the internal parts of the association. To be used in cases where an external association identifier should be used. :param assoc_id: :return: """ if assoc_id is None: self.assoc_id = self.make_association_id( self.definedby, self.sub, self.rel, self.obj) else: self.assoc_id = assoc_id return self.assoc_id
def bookmark(snapshot, bookmark): ''' Creates a bookmark of the given snapshot .. note:: Bookmarks mark the point in time when the snapshot was created, and can be used as the incremental source for a zfs send command. This feature must be enabled to be used. See zpool-features(5) for details on ZFS feature flags and the bookmarks feature. snapshot : string name of snapshot to bookmark bookmark : string name of bookmark .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.bookmark myzpool/mydataset@yesterday myzpool/mydataset#complete ''' # abort if we do not have feature flags if not __utils__['zfs.has_feature_flags'](): return OrderedDict([('error', 'bookmarks are not supported')]) ## Configure command # NOTE: initialize the defaults target = [] # NOTE: update target target.append(snapshot) target.append(bookmark) ## Bookmark snapshot res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='bookmark', target=target, ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'bookmarked')
Creates a bookmark of the given snapshot .. note:: Bookmarks mark the point in time when the snapshot was created, and can be used as the incremental source for a zfs send command. This feature must be enabled to be used. See zpool-features(5) for details on ZFS feature flags and the bookmarks feature. snapshot : string name of snapshot to bookmark bookmark : string name of bookmark .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.bookmark myzpool/mydataset@yesterday myzpool/mydataset#complete
Below is the the instruction that describes the task: ### Input: Creates a bookmark of the given snapshot .. note:: Bookmarks mark the point in time when the snapshot was created, and can be used as the incremental source for a zfs send command. This feature must be enabled to be used. See zpool-features(5) for details on ZFS feature flags and the bookmarks feature. snapshot : string name of snapshot to bookmark bookmark : string name of bookmark .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.bookmark myzpool/mydataset@yesterday myzpool/mydataset#complete ### Response: def bookmark(snapshot, bookmark): ''' Creates a bookmark of the given snapshot .. note:: Bookmarks mark the point in time when the snapshot was created, and can be used as the incremental source for a zfs send command. This feature must be enabled to be used. See zpool-features(5) for details on ZFS feature flags and the bookmarks feature. snapshot : string name of snapshot to bookmark bookmark : string name of bookmark .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.bookmark myzpool/mydataset@yesterday myzpool/mydataset#complete ''' # abort if we do not have feature flags if not __utils__['zfs.has_feature_flags'](): return OrderedDict([('error', 'bookmarks are not supported')]) ## Configure command # NOTE: initialize the defaults target = [] # NOTE: update target target.append(snapshot) target.append(bookmark) ## Bookmark snapshot res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='bookmark', target=target, ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'bookmarked')
def construct_auth (self): """Construct HTTP Basic authentication credentials if there is user/password information available. Does not overwrite if credentials have already been constructed.""" if self.auth: return _user, _password = self.get_user_password() if _user is not None and _password is not None: self.auth = (_user, _password)
Construct HTTP Basic authentication credentials if there is user/password information available. Does not overwrite if credentials have already been constructed.
Below is the the instruction that describes the task: ### Input: Construct HTTP Basic authentication credentials if there is user/password information available. Does not overwrite if credentials have already been constructed. ### Response: def construct_auth (self): """Construct HTTP Basic authentication credentials if there is user/password information available. Does not overwrite if credentials have already been constructed.""" if self.auth: return _user, _password = self.get_user_password() if _user is not None and _password is not None: self.auth = (_user, _password)
def workflow_set_stage_inputs(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /workflow-xxxx/setStageInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2FsetStageInputs """ return DXHTTPRequest('/%s/setStageInputs' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /workflow-xxxx/setStageInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2FsetStageInputs
Below is the the instruction that describes the task: ### Input: Invokes the /workflow-xxxx/setStageInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2FsetStageInputs ### Response: def workflow_set_stage_inputs(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /workflow-xxxx/setStageInputs API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Workflows-and-Analyses#API-method%3A-%2Fworkflow-xxxx%2FsetStageInputs """ return DXHTTPRequest('/%s/setStageInputs' % object_id, input_params, always_retry=always_retry, **kwargs)
def log_pdf(self, y, mu, weights=None): """ computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None containing sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n """ if weights is None: weights = np.ones_like(mu) gamma = weights / self.scale return sp.stats.invgauss.logpdf(y, mu, scale=1./gamma)
computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None containing sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n
Below is the the instruction that describes the task: ### Input: computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None containing sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n ### Response: def log_pdf(self, y, mu, weights=None): """ computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None containing sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n """ if weights is None: weights = np.ones_like(mu) gamma = weights / self.scale return sp.stats.invgauss.logpdf(y, mu, scale=1./gamma)
def _init_ws(n_items, comparisons, prior_inv, tau, nu): """Initialize parameters in the weight space.""" prec = np.zeros((n_items, n_items)) xs = np.zeros(n_items) for i, (a, b) in enumerate(comparisons): prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT xs[a] += nu[i] xs[b] -= nu[i] cov = inv_posdef(prior_inv + prec) mean = cov.dot(xs) return mean, cov, xs , prec
Initialize parameters in the weight space.
Below is the the instruction that describes the task: ### Input: Initialize parameters in the weight space. ### Response: def _init_ws(n_items, comparisons, prior_inv, tau, nu): """Initialize parameters in the weight space.""" prec = np.zeros((n_items, n_items)) xs = np.zeros(n_items) for i, (a, b) in enumerate(comparisons): prec[(a, a, b, b), (a, b, a, b)] += tau[i] * MAT_ONE_FLAT xs[a] += nu[i] xs[b] -= nu[i] cov = inv_posdef(prior_inv + prec) mean = cov.dot(xs) return mean, cov, xs , prec
def flatten_list(list_): """ Banana banana """ res = [] for elem in list_: if isinstance(elem, list): res.extend(flatten_list(elem)) else: res.append(elem) return res
Banana banana
Below is the the instruction that describes the task: ### Input: Banana banana ### Response: def flatten_list(list_): """ Banana banana """ res = [] for elem in list_: if isinstance(elem, list): res.extend(flatten_list(elem)) else: res.append(elem) return res
def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1): """ Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void """ with ScratchDir('.'): name = "temp_zeo" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None if rad_dict: rad_file = name + ".rad" with open(rad_file, 'w') as fp: for el in rad_dict.keys(): fp.write("{0} {1}".format(el, rad_dict[el])) atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file) vol_str = volume(atmnet, 0.3, probe_rad, 10000) sa_str = surface_area(atmnet, 0.3, probe_rad, 10000) vol = None sa = None for line in vol_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: vol = -1.0 break if float(fields[1]) == 0: vol = -1.0 break vol = float(fields[3]) for line in sa_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: # raise ValueError("Too many voids") sa = -1.0 break if float(fields[1]) == 0: sa = -1.0 break sa = float(fields[3]) if not vol or not sa: raise ValueError("Error in zeo++ output stream") return vol, sa
Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void
Below is the the instruction that describes the task: ### Input: Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void ### Response: def get_void_volume_surfarea(structure, rad_dict=None, chan_rad=0.3, probe_rad=0.1): """ Computes the volume and surface area of isolated void using Zeo++. Useful to compute the volume and surface area of vacant site. Args: structure: pymatgen Structure containing vacancy rad_dict(optional): Dictionary with short name of elements and their radii. chan_rad(optional): Minimum channel Radius. probe_rad(optional): Probe radius for Monte Carlo sampling. Returns: volume: floating number representing the volume of void """ with ScratchDir('.'): name = "temp_zeo" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None if rad_dict: rad_file = name + ".rad" with open(rad_file, 'w') as fp: for el in rad_dict.keys(): fp.write("{0} {1}".format(el, rad_dict[el])) atmnet = AtomNetwork.read_from_CSSR(zeo_inp_filename, True, rad_file) vol_str = volume(atmnet, 0.3, probe_rad, 10000) sa_str = surface_area(atmnet, 0.3, probe_rad, 10000) vol = None sa = None for line in vol_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: vol = -1.0 break if float(fields[1]) == 0: vol = -1.0 break vol = float(fields[3]) for line in sa_str.split("\n"): if "Number_of_pockets" in line: fields = line.split() if float(fields[1]) > 1: # raise ValueError("Too many voids") sa = -1.0 break if float(fields[1]) == 0: sa = -1.0 break sa = float(fields[3]) if not vol or not sa: raise ValueError("Error in zeo++ output stream") return vol, sa
def next_history(self, current): # (C-n) u'''Move forward through the history list, fetching the next command. ''' if self.history_cursor < len(self.history) - 1: self.history_cursor += 1 current.set_line(self.history[self.history_cursor].get_line_text())
u'''Move forward through the history list, fetching the next command.
Below is the the instruction that describes the task: ### Input: u'''Move forward through the history list, fetching the next command. ### Response: def next_history(self, current): # (C-n) u'''Move forward through the history list, fetching the next command. ''' if self.history_cursor < len(self.history) - 1: self.history_cursor += 1 current.set_line(self.history[self.history_cursor].get_line_text())
def truncate(self, n): """Erase [n] elements.""" # Current byte position - (n * data_size) size = self.size - n * self.__strct.size # Erase [size] bytes from file.tell() self.__file.truncate(size)
Erase [n] elements.
Below is the the instruction that describes the task: ### Input: Erase [n] elements. ### Response: def truncate(self, n): """Erase [n] elements.""" # Current byte position - (n * data_size) size = self.size - n * self.__strct.size # Erase [size] bytes from file.tell() self.__file.truncate(size)
def parse_data(self, text, maxwidth, maxheight, template_dir, context, urlize_all_links): """ Parses a block of text rendering links that occur on their own line normally but rendering inline links using a special template dir """ block_parser = TextBlockParser() lines = text.splitlines() parsed = [] for line in lines: if STANDALONE_URL_RE.match(line): user_url = line.strip() try: resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight) context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) except OEmbedException: if urlize_all_links: line = '<a href="%(LINK)s">%(LINK)s</a>' % {'LINK': user_url} else: context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) line = self.render_oembed( resource, user_url, template_dir=template_dir, context=context) else: line = block_parser.parse(line, maxwidth, maxheight, 'inline', context, urlize_all_links) parsed.append(line) return mark_safe('\n'.join(parsed))
Parses a block of text rendering links that occur on their own line normally but rendering inline links using a special template dir
Below is the the instruction that describes the task: ### Input: Parses a block of text rendering links that occur on their own line normally but rendering inline links using a special template dir ### Response: def parse_data(self, text, maxwidth, maxheight, template_dir, context, urlize_all_links): """ Parses a block of text rendering links that occur on their own line normally but rendering inline links using a special template dir """ block_parser = TextBlockParser() lines = text.splitlines() parsed = [] for line in lines: if STANDALONE_URL_RE.match(line): user_url = line.strip() try: resource = oembed.site.embed(user_url, maxwidth=maxwidth, maxheight=maxheight) context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) except OEmbedException: if urlize_all_links: line = '<a href="%(LINK)s">%(LINK)s</a>' % {'LINK': user_url} else: context['minwidth'] = min(maxwidth, resource.width) context['minheight'] = min(maxheight, resource.height) line = self.render_oembed( resource, user_url, template_dir=template_dir, context=context) else: line = block_parser.parse(line, maxwidth, maxheight, 'inline', context, urlize_all_links) parsed.append(line) return mark_safe('\n'.join(parsed))
def convert_image_to_knitting_pattern(path, colors=("white", "black")): """Load a image file such as a png bitmap of jpeg file and convert it to a :ref:`knitting pattern file <FileFormatSpecification>`. :param list colors: a list of strings that should be used as :ref:`colors <png-color>`. :param str path: ignore this. It is fulfilled by the loeder. Example: .. code:: python convert_image_to_knitting_pattern().path("image.png").path("image.json") """ image = PIL.Image.open(path) pattern_id = os.path.splitext(os.path.basename(path))[0] rows = [] connections = [] pattern_set = { "version": "0.1", "type": "knitting pattern", "comment": { "source": path }, "patterns": [ { "name": pattern_id, "id": pattern_id, "rows": rows, "connections": connections } ]} bbox = image.getbbox() if not bbox: return pattern_set white = image.getpixel((0, 0)) min_x, min_y, max_x, max_y = bbox last_row_y = None for y in reversed(range(min_y, max_y)): instructions = [] row = {"id": y, "instructions": instructions} rows.append(row) for x in range(min_x, max_x): if image.getpixel((x, y)) == white: color = colors[0] else: color = colors[1] instruction = {"color": color} instructions.append(instruction) if last_row_y is not None: connections.append({"from": {"id": last_row_y}, "to": {"id": y}}) last_row_y = y return pattern_set
Load a image file such as a png bitmap of jpeg file and convert it to a :ref:`knitting pattern file <FileFormatSpecification>`. :param list colors: a list of strings that should be used as :ref:`colors <png-color>`. :param str path: ignore this. It is fulfilled by the loeder. Example: .. code:: python convert_image_to_knitting_pattern().path("image.png").path("image.json")
Below is the the instruction that describes the task: ### Input: Load a image file such as a png bitmap of jpeg file and convert it to a :ref:`knitting pattern file <FileFormatSpecification>`. :param list colors: a list of strings that should be used as :ref:`colors <png-color>`. :param str path: ignore this. It is fulfilled by the loeder. Example: .. code:: python convert_image_to_knitting_pattern().path("image.png").path("image.json") ### Response: def convert_image_to_knitting_pattern(path, colors=("white", "black")): """Load a image file such as a png bitmap of jpeg file and convert it to a :ref:`knitting pattern file <FileFormatSpecification>`. :param list colors: a list of strings that should be used as :ref:`colors <png-color>`. :param str path: ignore this. It is fulfilled by the loeder. Example: .. code:: python convert_image_to_knitting_pattern().path("image.png").path("image.json") """ image = PIL.Image.open(path) pattern_id = os.path.splitext(os.path.basename(path))[0] rows = [] connections = [] pattern_set = { "version": "0.1", "type": "knitting pattern", "comment": { "source": path }, "patterns": [ { "name": pattern_id, "id": pattern_id, "rows": rows, "connections": connections } ]} bbox = image.getbbox() if not bbox: return pattern_set white = image.getpixel((0, 0)) min_x, min_y, max_x, max_y = bbox last_row_y = None for y in reversed(range(min_y, max_y)): instructions = [] row = {"id": y, "instructions": instructions} rows.append(row) for x in range(min_x, max_x): if image.getpixel((x, y)) == white: color = colors[0] else: color = colors[1] instruction = {"color": color} instructions.append(instruction) if last_row_y is not None: connections.append({"from": {"id": last_row_y}, "to": {"id": y}}) last_row_y = y return pattern_set
def get_queryset(self, *args, **kwargs): """ Ensures that this manager always returns nodes in tree order. """ qs = super(TreeManager, self).get_queryset(*args, **kwargs) # Restrict operations to pages on the current site if needed if settings.PAGES_HIDE_SITES and settings.PAGES_USE_SITE_ID: return qs.order_by(self.tree_id_attr, self.left_attr).filter(sites=settings.SITE_ID) else: return qs.order_by(self.tree_id_attr, self.left_attr)
Ensures that this manager always returns nodes in tree order.
Below is the the instruction that describes the task: ### Input: Ensures that this manager always returns nodes in tree order. ### Response: def get_queryset(self, *args, **kwargs): """ Ensures that this manager always returns nodes in tree order. """ qs = super(TreeManager, self).get_queryset(*args, **kwargs) # Restrict operations to pages on the current site if needed if settings.PAGES_HIDE_SITES and settings.PAGES_USE_SITE_ID: return qs.order_by(self.tree_id_attr, self.left_attr).filter(sites=settings.SITE_ID) else: return qs.order_by(self.tree_id_attr, self.left_attr)
def solid_angle(center, coords): """ Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle. """ o = np.array(center) r = [np.array(c) - o for c in coords] r.append(r[0]) n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)] n.append(np.cross(r[1], r[0])) vals = [] for i in range(len(n) - 1): v = -np.dot(n[i], n[i + 1]) \ / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])) vals.append(acos(abs_cap(v))) phi = sum(vals) return phi + (3 - len(r)) * pi
Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle.
Below is the the instruction that describes the task: ### Input: Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle. ### Response: def solid_angle(center, coords): """ Helper method to calculate the solid angle of a set of coords from the center. Args: center (3x1 array): Center to measure solid angle from. coords (Nx3 array): List of coords to determine solid angle. Returns: The solid angle. """ o = np.array(center) r = [np.array(c) - o for c in coords] r.append(r[0]) n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)] n.append(np.cross(r[1], r[0])) vals = [] for i in range(len(n) - 1): v = -np.dot(n[i], n[i + 1]) \ / (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])) vals.append(acos(abs_cap(v))) phi = sum(vals) return phi + (3 - len(r)) * pi
def content_upload(self, key, model, contentid, data, mimetype): """Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later """ headers = {"Content-Type": mimetype} path = PROVISION_MANAGE_CONTENT + model + '/' + contentid return self._request(path, key, data, 'POST', self._manage_by_cik, headers)
Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later
Below is the the instruction that describes the task: ### Input: Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later ### Response: def content_upload(self, key, model, contentid, data, mimetype): """Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later """ headers = {"Content-Type": mimetype} path = PROVISION_MANAGE_CONTENT + model + '/' + contentid return self._request(path, key, data, 'POST', self._manage_by_cik, headers)
def create_pids(cls, record_uuid, pids): """Create persistent identifiers.""" for p in pids: PersistentIdentifier.create( pid_type=p.pid_type, pid_value=p.pid_value, pid_provider=p.provider.pid_provider if p.provider else None, object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED, ) db.session.commit()
Create persistent identifiers.
Below is the the instruction that describes the task: ### Input: Create persistent identifiers. ### Response: def create_pids(cls, record_uuid, pids): """Create persistent identifiers.""" for p in pids: PersistentIdentifier.create( pid_type=p.pid_type, pid_value=p.pid_value, pid_provider=p.provider.pid_provider if p.provider else None, object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED, ) db.session.commit()
def create(self, source_type, name, resources, auth=None, parameters=None, validate=True): """ Create a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :param validate: bool to determine if validation should be performed on the source :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ assert resources, "Need at least one resource" params = { 'source_type': source_type, 'name': name, 'resources': resources, 'validate': validate } if auth: params['auth'] = auth if parameters: params['parameters'] = parameters return self.request.post('create', params)
Create a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :param validate: bool to determine if validation should be performed on the source :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
Below is the the instruction that describes the task: ### Input: Create a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :param validate: bool to determine if validation should be performed on the source :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` ### Response: def create(self, source_type, name, resources, auth=None, parameters=None, validate=True): """ Create a managed source Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/sourcecreate :param source_type: data source name e.g. facebook_page, googleplus, instagram, yammer :type source_type: str :param name: name to use to identify the managed source being created :type name: str :param resources: list of source-specific config dicts :type resources: list :param auth: list of source-specific authentication dicts :type auth: list :param parameters: (optional) dict with config information on how to treat each resource :type parameters: dict :param validate: bool to determine if validation should be performed on the source :type validate: bool :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` """ assert resources, "Need at least one resource" params = { 'source_type': source_type, 'name': name, 'resources': resources, 'validate': validate } if auth: params['auth'] = auth if parameters: params['parameters'] = parameters return self.request.post('create', params)
def apply_region_configs(env_config): """Override default env configs with region specific configs and nest all values under a region Args: env_config (dict): The environment specific config. Return: dict: Newly updated dictionary with region overrides applied. """ new_config = env_config.copy() for region in env_config.get('regions', REGIONS): if isinstance(env_config.get('regions'), dict): region_specific_config = env_config['regions'][region] new_config[region] = dict(DeepChainMap(region_specific_config, env_config)) else: new_config[region] = env_config.copy() LOG.debug('Region Specific Config:\n%s', new_config) return new_config
Override default env configs with region specific configs and nest all values under a region Args: env_config (dict): The environment specific config. Return: dict: Newly updated dictionary with region overrides applied.
Below is the the instruction that describes the task: ### Input: Override default env configs with region specific configs and nest all values under a region Args: env_config (dict): The environment specific config. Return: dict: Newly updated dictionary with region overrides applied. ### Response: def apply_region_configs(env_config): """Override default env configs with region specific configs and nest all values under a region Args: env_config (dict): The environment specific config. Return: dict: Newly updated dictionary with region overrides applied. """ new_config = env_config.copy() for region in env_config.get('regions', REGIONS): if isinstance(env_config.get('regions'), dict): region_specific_config = env_config['regions'][region] new_config[region] = dict(DeepChainMap(region_specific_config, env_config)) else: new_config[region] = env_config.copy() LOG.debug('Region Specific Config:\n%s', new_config) return new_config