code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def as_iso8601(self): """ example: 2016-08-13T00:38:05.210+00:00 """ if self.__date is None or self.__time is None: return None return "20%s-%s-%sT%s:%s:%s0Z" % \ (self.__date[4:], self.__date[2:4], self.__date[:2], self.__time[:2], self.__time[2:4], self.__time[4:])
example: 2016-08-13T00:38:05.210+00:00
Below is the the instruction that describes the task: ### Input: example: 2016-08-13T00:38:05.210+00:00 ### Response: def as_iso8601(self): """ example: 2016-08-13T00:38:05.210+00:00 """ if self.__date is None or self.__time is None: return None return "20%s-%s-%sT%s:%s:%s0Z" % \ (self.__date[4:], self.__date[2:4], self.__date[:2], self.__time[:2], self.__time[2:4], self.__time[4:])
async def open_async(self): """ Starts the host. """ if not self.loop: self.loop = asyncio.get_event_loop() await self.partition_manager.start_async()
Starts the host.
Below is the the instruction that describes the task: ### Input: Starts the host. ### Response: async def open_async(self): """ Starts the host. """ if not self.loop: self.loop = asyncio.get_event_loop() await self.partition_manager.start_async()
def future_datetime(self, end_date='+30d', tzinfo=None): """ Get a DateTime object based on a random date between 1 second form now and a given date. Accepts date strings that can be recognized by strtotime(). :param end_date Defaults to "+30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ return self.date_time_between( start_date='+1s', end_date=end_date, tzinfo=tzinfo, )
Get a DateTime object based on a random date between 1 second form now and a given date. Accepts date strings that can be recognized by strtotime(). :param end_date Defaults to "+30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime
Below is the the instruction that describes the task: ### Input: Get a DateTime object based on a random date between 1 second form now and a given date. Accepts date strings that can be recognized by strtotime(). :param end_date Defaults to "+30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime ### Response: def future_datetime(self, end_date='+30d', tzinfo=None): """ Get a DateTime object based on a random date between 1 second form now and a given date. Accepts date strings that can be recognized by strtotime(). :param end_date Defaults to "+30d" :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('1999-02-02 11:42:52') :return DateTime """ return self.date_time_between( start_date='+1s', end_date=end_date, tzinfo=tzinfo, )
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align="start", trim_seq_len=None): """ Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection. """ y = dt[response] seq = dt[sequence] if trim_seq_len is not None: seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len) seq = [s.replace("N", "") for s in seq] dt_kmer = kmer_count(seq, k) Xsp = csc_matrix(dt_kmer) en = ElasticNet(alpha=1, standardize=False, n_splits=3) en.fit(Xsp, y) # which coefficients are nonzero?= nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist() # perform stepwise selection # # TODO - how do we deal with the intercept? # largest number of motifs where they don't differ by more than 1 k-mer def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True): """ perform stepwise model selection while preventing to add a motif similar to the already selected motifs. """ F, pval = f_regression(dt_kmer[to_be_selected_kmers], y) kmer = to_be_selected_kmers.pop(pval.argmin()) selected_kmers.append(kmer) def select_criterion(s1, s2, consider_shift=True): if hamming_distance(s1, s2) <= 1: return False if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0: return False if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0: return False return True to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)] if len(to_be_selected_kmers) == 0: return selected_kmers else: # regress out the new feature lm = LinearRegression() lm.fit(dt_kmer[selected_kmers], y) y_new = y - lm.predict(dt_kmer[selected_kmers]) return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift) selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift) return selected_kmers
Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection.
Below is the the instruction that describes the task: ### Input: Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection. ### Response: def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align="start", trim_seq_len=None): """ Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection. """ y = dt[response] seq = dt[sequence] if trim_seq_len is not None: seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len) seq = [s.replace("N", "") for s in seq] dt_kmer = kmer_count(seq, k) Xsp = csc_matrix(dt_kmer) en = ElasticNet(alpha=1, standardize=False, n_splits=3) en.fit(Xsp, y) # which coefficients are nonzero?= nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist() # perform stepwise selection # # TODO - how do we deal with the intercept? # largest number of motifs where they don't differ by more than 1 k-mer def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True): """ perform stepwise model selection while preventing to add a motif similar to the already selected motifs. """ F, pval = f_regression(dt_kmer[to_be_selected_kmers], y) kmer = to_be_selected_kmers.pop(pval.argmin()) selected_kmers.append(kmer) def select_criterion(s1, s2, consider_shift=True): if hamming_distance(s1, s2) <= 1: return False if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0: return False if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0: return False return True to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)] if len(to_be_selected_kmers) == 0: return selected_kmers else: # regress out the new feature lm = LinearRegression() lm.fit(dt_kmer[selected_kmers], y) y_new = y - lm.predict(dt_kmer[selected_kmers]) return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift) selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift) return selected_kmers
def build(capabilities, device_handler): "Given a list of capability URI's returns <hello> message XML string" if device_handler: # This is used as kwargs dictionary for lxml's Element() function. # Therefore the arg-name ("nsmap") is used as key here. xml_namespace_kwargs = { "nsmap" : device_handler.get_xml_base_namespace_dict() } else: xml_namespace_kwargs = {} hello = new_ele("hello", **xml_namespace_kwargs) caps = sub_ele(hello, "capabilities") def fun(uri): sub_ele(caps, "capability").text = uri #python3 changes if sys.version < '3': map(fun, capabilities) else: list(map(fun, capabilities)) return to_xml(hello)
Given a list of capability URI's returns <hello> message XML string
Below is the the instruction that describes the task: ### Input: Given a list of capability URI's returns <hello> message XML string ### Response: def build(capabilities, device_handler): "Given a list of capability URI's returns <hello> message XML string" if device_handler: # This is used as kwargs dictionary for lxml's Element() function. # Therefore the arg-name ("nsmap") is used as key here. xml_namespace_kwargs = { "nsmap" : device_handler.get_xml_base_namespace_dict() } else: xml_namespace_kwargs = {} hello = new_ele("hello", **xml_namespace_kwargs) caps = sub_ele(hello, "capabilities") def fun(uri): sub_ele(caps, "capability").text = uri #python3 changes if sys.version < '3': map(fun, capabilities) else: list(map(fun, capabilities)) return to_xml(hello)
def set_logging_level(cl_args): """simply set verbose level based on command-line args :param cl_args: CLI arguments :type cl_args: dict :return: None :rtype: None """ if 'verbose' in cl_args and cl_args['verbose']: configure(logging.DEBUG) else: configure(logging.INFO)
simply set verbose level based on command-line args :param cl_args: CLI arguments :type cl_args: dict :return: None :rtype: None
Below is the the instruction that describes the task: ### Input: simply set verbose level based on command-line args :param cl_args: CLI arguments :type cl_args: dict :return: None :rtype: None ### Response: def set_logging_level(cl_args): """simply set verbose level based on command-line args :param cl_args: CLI arguments :type cl_args: dict :return: None :rtype: None """ if 'verbose' in cl_args and cl_args['verbose']: configure(logging.DEBUG) else: configure(logging.INFO)
def argval(self): """ Returns the value of the arg (if any) or None. If the arg. is not an integer, an error be triggered. """ if self.arg is None or any(x is None for x in self.arg): return None for x in self.arg: if not isinstance(x, int): raise InvalidArgError(self.arg) return self.arg
Returns the value of the arg (if any) or None. If the arg. is not an integer, an error be triggered.
Below is the the instruction that describes the task: ### Input: Returns the value of the arg (if any) or None. If the arg. is not an integer, an error be triggered. ### Response: def argval(self): """ Returns the value of the arg (if any) or None. If the arg. is not an integer, an error be triggered. """ if self.arg is None or any(x is None for x in self.arg): return None for x in self.arg: if not isinstance(x, int): raise InvalidArgError(self.arg) return self.arg
def urlstate(self, encryption_key): """ Will return a url safe representation of the state. :type encryption_key: Key used for encryption. :rtype: str :return: Url representation av of the state. """ lzma = LZMACompressor() urlstate_data = json.dumps(self._state_dict) urlstate_data = lzma.compress(urlstate_data.encode("UTF-8")) urlstate_data += lzma.flush() urlstate_data = _AESCipher(encryption_key).encrypt(urlstate_data) lzma = LZMACompressor() urlstate_data = lzma.compress(urlstate_data) urlstate_data += lzma.flush() urlstate_data = base64.urlsafe_b64encode(urlstate_data) return urlstate_data.decode("utf-8")
Will return a url safe representation of the state. :type encryption_key: Key used for encryption. :rtype: str :return: Url representation av of the state.
Below is the the instruction that describes the task: ### Input: Will return a url safe representation of the state. :type encryption_key: Key used for encryption. :rtype: str :return: Url representation av of the state. ### Response: def urlstate(self, encryption_key): """ Will return a url safe representation of the state. :type encryption_key: Key used for encryption. :rtype: str :return: Url representation av of the state. """ lzma = LZMACompressor() urlstate_data = json.dumps(self._state_dict) urlstate_data = lzma.compress(urlstate_data.encode("UTF-8")) urlstate_data += lzma.flush() urlstate_data = _AESCipher(encryption_key).encrypt(urlstate_data) lzma = LZMACompressor() urlstate_data = lzma.compress(urlstate_data) urlstate_data += lzma.flush() urlstate_data = base64.urlsafe_b64encode(urlstate_data) return urlstate_data.decode("utf-8")
def send_status(self, payload): """Send the daemon status and the current queue for displaying.""" answer = {} data = [] # Get daemon status if self.paused: answer['status'] = 'paused' else: answer['status'] = 'running' # Add current queue or a message, that queue is empty if len(self.queue) > 0: data = deepcopy(self.queue.queue) # Remove stderr and stdout output for transfer # Some outputs are way to big for the socket buffer # and this is not needed by the client for key, item in data.items(): if 'stderr' in item: del item['stderr'] if 'stdout' in item: del item['stdout'] else: data = 'Queue is empty' answer['data'] = data return answer
Send the daemon status and the current queue for displaying.
Below is the the instruction that describes the task: ### Input: Send the daemon status and the current queue for displaying. ### Response: def send_status(self, payload): """Send the daemon status and the current queue for displaying.""" answer = {} data = [] # Get daemon status if self.paused: answer['status'] = 'paused' else: answer['status'] = 'running' # Add current queue or a message, that queue is empty if len(self.queue) > 0: data = deepcopy(self.queue.queue) # Remove stderr and stdout output for transfer # Some outputs are way to big for the socket buffer # and this is not needed by the client for key, item in data.items(): if 'stderr' in item: del item['stderr'] if 'stdout' in item: del item['stdout'] else: data = 'Queue is empty' answer['data'] = data return answer
def get_full_name_or_username(self): """ Returns the full name of the user, or if none is supplied will return the username. Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it should return the username or email address when the full name is not supplied. :return: ``String`` containing the full name of the user. If no name is supplied it will return the username or email address depending on the ``ACCOUNTS_WITHOUT_USERNAMES`` setting. """ if self.first_name or self.last_name: # We will return this as translated string. Maybe there are some # countries that first display the last name. name = _(u"%(first_name)s %(last_name)s") % \ {'first_name': self.first_name, 'last_name': self.last_name} else: # Fallback to the username if usernames are used if not defaults.ACCOUNTS_WITHOUT_USERNAMES: name = "%(username)s" % {'username': self.username} else: name = "%(email)s" % {'email': self.email} return name.strip()
Returns the full name of the user, or if none is supplied will return the username. Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it should return the username or email address when the full name is not supplied. :return: ``String`` containing the full name of the user. If no name is supplied it will return the username or email address depending on the ``ACCOUNTS_WITHOUT_USERNAMES`` setting.
Below is the the instruction that describes the task: ### Input: Returns the full name of the user, or if none is supplied will return the username. Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it should return the username or email address when the full name is not supplied. :return: ``String`` containing the full name of the user. If no name is supplied it will return the username or email address depending on the ``ACCOUNTS_WITHOUT_USERNAMES`` setting. ### Response: def get_full_name_or_username(self): """ Returns the full name of the user, or if none is supplied will return the username. Also looks at ``ACCOUNTS_WITHOUT_USERNAMES`` settings to define if it should return the username or email address when the full name is not supplied. :return: ``String`` containing the full name of the user. If no name is supplied it will return the username or email address depending on the ``ACCOUNTS_WITHOUT_USERNAMES`` setting. """ if self.first_name or self.last_name: # We will return this as translated string. Maybe there are some # countries that first display the last name. name = _(u"%(first_name)s %(last_name)s") % \ {'first_name': self.first_name, 'last_name': self.last_name} else: # Fallback to the username if usernames are used if not defaults.ACCOUNTS_WITHOUT_USERNAMES: name = "%(username)s" % {'username': self.username} else: name = "%(email)s" % {'email': self.email} return name.strip()
def set_pkg_desc(self, doc, text): """Set's the package's description. Raises SPDXValueError if text is not free form text. Raises CardinalityError if description already set. Raises OrderError if no package previously defined. """ self.assert_package_exists() if not self.package_desc_set: self.package_desc_set = True if validations.validate_pkg_desc(text): doc.package.description = str_from_text(text) else: raise SPDXValueError('Package::Description') else: raise CardinalityError('Package::Description')
Set's the package's description. Raises SPDXValueError if text is not free form text. Raises CardinalityError if description already set. Raises OrderError if no package previously defined.
Below is the the instruction that describes the task: ### Input: Set's the package's description. Raises SPDXValueError if text is not free form text. Raises CardinalityError if description already set. Raises OrderError if no package previously defined. ### Response: def set_pkg_desc(self, doc, text): """Set's the package's description. Raises SPDXValueError if text is not free form text. Raises CardinalityError if description already set. Raises OrderError if no package previously defined. """ self.assert_package_exists() if not self.package_desc_set: self.package_desc_set = True if validations.validate_pkg_desc(text): doc.package.description = str_from_text(text) else: raise SPDXValueError('Package::Description') else: raise CardinalityError('Package::Description')
def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret
Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0
Below is the the instruction that describes the task: ### Input: Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ### Response: def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret
def _check_cats(cats, vtypes, df, prep, callers): """Only include categories in the final output if they have values. """ out = [] for cat in cats: all_vals = [] for vtype in vtypes: vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) all_vals.extend(vals) if sum(all_vals) / float(len(all_vals)) > 2: out.append(cat) if len(out) == 0: return cats else: return out
Only include categories in the final output if they have values.
Below is the the instruction that describes the task: ### Input: Only include categories in the final output if they have values. ### Response: def _check_cats(cats, vtypes, df, prep, callers): """Only include categories in the final output if they have values. """ out = [] for cat in cats: all_vals = [] for vtype in vtypes: vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) all_vals.extend(vals) if sum(all_vals) / float(len(all_vals)) > 2: out.append(cat) if len(out) == 0: return cats else: return out
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_wwn(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_wwn = ET.SubElement(fcoe_intf_list, "fcoe-intf-wwn") fcoe_intf_wwn.text = kwargs.pop('fcoe_intf_wwn') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_wwn(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id') fcoe_intf_wwn = ET.SubElement(fcoe_intf_list, "fcoe-intf-wwn") fcoe_intf_wwn.text = kwargs.pop('fcoe_intf_wwn') callback = kwargs.pop('callback', self._callback) return callback(config)
def wasp_snp_directory(vcf, directory, sample_name=None): """ Convert VCF file into input for WASP. Only bi-allelic heterozygous sites are used. Parameters: ----------- vcf : str Path to VCF file. directory : str Output directory. This is the directory that will hold the files for WASP. sample_name : str If provided, use this sample name to get heterozygous SNPs from VCF file. """ chrom = [] pos = [] ref = [] alt = [] vcf_reader = pyvcf.Reader(open(vcf, 'r')) if sample_name: def condition(record, sample_name): return sample_name in [x.sample for x in record.get_hets()] else: def condition(record, sample_name): return len(record.get_hets()) > 0 for record in vcf_reader: if condition(record, sample_name): if len(record.ALT) == 1: chrom.append(record.CHROM) pos.append(record.POS) ref.append(record.REF) alt.append(record.ALT[0].sequence) df = pd.DataFrame([chrom, pos, ref, alt], index=['chrom', 'position', 'RefAllele', 'AltAllele']).T if not os.path.exists(directory): os.makedirs(directory) for c in set(df.chrom): tdf = df[df.chrom == c] if tdf.shape[0] > 0: f = gzip.open(os.path.join(directory, '{}.snps.txt.gz'.format(c)), 'wb') lines = (tdf.position.astype(str) + '\t' + tdf.RefAllele + '\t' + tdf.AltAllele) f.write('\n'.join(lines) + '\n') f.close()
Convert VCF file into input for WASP. Only bi-allelic heterozygous sites are used. Parameters: ----------- vcf : str Path to VCF file. directory : str Output directory. This is the directory that will hold the files for WASP. sample_name : str If provided, use this sample name to get heterozygous SNPs from VCF file.
Below is the the instruction that describes the task: ### Input: Convert VCF file into input for WASP. Only bi-allelic heterozygous sites are used. Parameters: ----------- vcf : str Path to VCF file. directory : str Output directory. This is the directory that will hold the files for WASP. sample_name : str If provided, use this sample name to get heterozygous SNPs from VCF file. ### Response: def wasp_snp_directory(vcf, directory, sample_name=None): """ Convert VCF file into input for WASP. Only bi-allelic heterozygous sites are used. Parameters: ----------- vcf : str Path to VCF file. directory : str Output directory. This is the directory that will hold the files for WASP. sample_name : str If provided, use this sample name to get heterozygous SNPs from VCF file. """ chrom = [] pos = [] ref = [] alt = [] vcf_reader = pyvcf.Reader(open(vcf, 'r')) if sample_name: def condition(record, sample_name): return sample_name in [x.sample for x in record.get_hets()] else: def condition(record, sample_name): return len(record.get_hets()) > 0 for record in vcf_reader: if condition(record, sample_name): if len(record.ALT) == 1: chrom.append(record.CHROM) pos.append(record.POS) ref.append(record.REF) alt.append(record.ALT[0].sequence) df = pd.DataFrame([chrom, pos, ref, alt], index=['chrom', 'position', 'RefAllele', 'AltAllele']).T if not os.path.exists(directory): os.makedirs(directory) for c in set(df.chrom): tdf = df[df.chrom == c] if tdf.shape[0] > 0: f = gzip.open(os.path.join(directory, '{}.snps.txt.gz'.format(c)), 'wb') lines = (tdf.position.astype(str) + '\t' + tdf.RefAllele + '\t' + tdf.AltAllele) f.write('\n'.join(lines) + '\n') f.close()
def rtm( # type: ignore self, url: Optional[str] = None, bot_id: Optional[str] = None ) -> Iterator[events.Event]: """ Iterate over event from the RTM API Args: url: Websocket connection url bot_id: Connecting bot ID Returns: :class:`slack.events.Event` or :class:`slack.events.Message` """ while True: bot_id = bot_id or self._find_bot_id() url = url or self._find_rtm_url() for event in self._incoming_from_rtm(url, bot_id): yield event url = None
Iterate over event from the RTM API Args: url: Websocket connection url bot_id: Connecting bot ID Returns: :class:`slack.events.Event` or :class:`slack.events.Message`
Below is the the instruction that describes the task: ### Input: Iterate over event from the RTM API Args: url: Websocket connection url bot_id: Connecting bot ID Returns: :class:`slack.events.Event` or :class:`slack.events.Message` ### Response: def rtm( # type: ignore self, url: Optional[str] = None, bot_id: Optional[str] = None ) -> Iterator[events.Event]: """ Iterate over event from the RTM API Args: url: Websocket connection url bot_id: Connecting bot ID Returns: :class:`slack.events.Event` or :class:`slack.events.Message` """ while True: bot_id = bot_id or self._find_bot_id() url = url or self._find_rtm_url() for event in self._incoming_from_rtm(url, bot_id): yield event url = None
def close(self): """Close socket connection""" if self._writer: self._writer.transport.close() self._writer = None self._reader = None
Close socket connection
Below is the the instruction that describes the task: ### Input: Close socket connection ### Response: def close(self): """Close socket connection""" if self._writer: self._writer.transport.close() self._writer = None self._reader = None
def pivot_filter(pivot_array, predicates, ty=None): """ Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation """ weld_obj = WeldObject(encoder_, decoder_) pivot_array_var = weld_obj.update(pivot_array) if isinstance(pivot_array, WeldObject): pivot_array_var = pivot_array.obj_id weld_obj.dependencies[pivot_array_var] = pivot_array predicates_var = weld_obj.update(predicates) if isinstance(predicates, WeldObject): predicates_var = predicates.obj_id weld_obj.dependencies[predicates_var] = predicates weld_template = """ let index_filtered = result( for( zip(%(array)s.$0, %(predicates)s), appender, |b, i, e| if (e.$1, merge(b, e.$0), b) ) ); let pivot_filtered = map( %(array)s.$1, |x| result( for( zip(x, %(predicates)s), appender, |b, i, e| if (e.$1, merge(b, e.$0), b) ) ) ); {index_filtered, pivot_filtered, %(array)s.$2} """ weld_obj.weld_code = weld_template % { "array": pivot_array_var, "predicates": predicates_var} return weld_obj
Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
Below is the the instruction that describes the task: ### Input: Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation ### Response: def pivot_filter(pivot_array, predicates, ty=None): """ Returns a new array, with each element in the original array satisfying the passed-in predicate set to `new_value` Args: array (WeldObject / Numpy.ndarray): Input array predicates (WeldObject / Numpy.ndarray<bool>): Predicate set ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation """ weld_obj = WeldObject(encoder_, decoder_) pivot_array_var = weld_obj.update(pivot_array) if isinstance(pivot_array, WeldObject): pivot_array_var = pivot_array.obj_id weld_obj.dependencies[pivot_array_var] = pivot_array predicates_var = weld_obj.update(predicates) if isinstance(predicates, WeldObject): predicates_var = predicates.obj_id weld_obj.dependencies[predicates_var] = predicates weld_template = """ let index_filtered = result( for( zip(%(array)s.$0, %(predicates)s), appender, |b, i, e| if (e.$1, merge(b, e.$0), b) ) ); let pivot_filtered = map( %(array)s.$1, |x| result( for( zip(x, %(predicates)s), appender, |b, i, e| if (e.$1, merge(b, e.$0), b) ) ) ); {index_filtered, pivot_filtered, %(array)s.$2} """ weld_obj.weld_code = weld_template % { "array": pivot_array_var, "predicates": predicates_var} return weld_obj
def sprint(string, *args, **kwargs): """Safe Print (handle UnicodeEncodeErrors on some terminals)""" try: print(string, *args, **kwargs) except UnicodeEncodeError: string = string.encode('utf-8', errors='ignore')\ .decode('ascii', errors='ignore') print(string, *args, **kwargs)
Safe Print (handle UnicodeEncodeErrors on some terminals)
Below is the the instruction that describes the task: ### Input: Safe Print (handle UnicodeEncodeErrors on some terminals) ### Response: def sprint(string, *args, **kwargs): """Safe Print (handle UnicodeEncodeErrors on some terminals)""" try: print(string, *args, **kwargs) except UnicodeEncodeError: string = string.encode('utf-8', errors='ignore')\ .decode('ascii', errors='ignore') print(string, *args, **kwargs)
def get_image_label(name, default="not_found.png"): """Return image inside a QLabel object""" label = QLabel() label.setPixmap(QPixmap(get_image_path(name, default))) return label
Return image inside a QLabel object
Below is the the instruction that describes the task: ### Input: Return image inside a QLabel object ### Response: def get_image_label(name, default="not_found.png"): """Return image inside a QLabel object""" label = QLabel() label.setPixmap(QPixmap(get_image_path(name, default))) return label
def GetZoneGroupState(self, *args, **kwargs): """Overrides default handling to use the global shared zone group state cache, unless another cache is specified.""" kwargs['cache'] = kwargs.get('cache', zone_group_state_shared_cache) return self.send_command('GetZoneGroupState', *args, **kwargs)
Overrides default handling to use the global shared zone group state cache, unless another cache is specified.
Below is the the instruction that describes the task: ### Input: Overrides default handling to use the global shared zone group state cache, unless another cache is specified. ### Response: def GetZoneGroupState(self, *args, **kwargs): """Overrides default handling to use the global shared zone group state cache, unless another cache is specified.""" kwargs['cache'] = kwargs.get('cache', zone_group_state_shared_cache) return self.send_command('GetZoneGroupState', *args, **kwargs)
def process_equations(key, value, fmt, meta): """Processes the attributed equations.""" if key == 'Math' and len(value) == 3: # Process the equation eq = _process_equation(value, fmt) # Get the attributes and label attrs = eq['attrs'] label = attrs[0] if eq['is_unreferenceable']: attrs[0] = '' # The label isn't needed outside this function # Context-dependent output if eq['is_unnumbered']: # Unnumbered is also unreferenceable return None elif fmt in ['latex', 'beamer']: return RawInline('tex', r'\begin{equation}%s\end{equation}'%value[-1]) elif fmt in ('html', 'html5') and LABEL_PATTERN.match(label): # Present equation and its number in a span text = str(references[label]) outerspan = RawInline('html', '<span %s style="display: inline-block; ' 'position: relative; width: 100%%">'%(''\ if eq['is_unreferenceable'] \ else 'id="%s"'%label)) innerspan = RawInline('html', '<span style="position: absolute; ' 'right: 0em; top: %s; line-height:0; ' 'text-align: right">' % ('0' if text.startswith('$') and text.endswith('$') else '50%',)) num = Math({"t":"InlineMath"}, '(%s)' % text[1:-1]) \ if text.startswith('$') and text.endswith('$') \ else Str('(%s)' % text) endspans = RawInline('html', '</span></span>') return [outerspan, AttrMath(*value), innerspan, num, endspans] elif fmt == 'docx': # As per http://officeopenxml.com/WPhyperlink.php bookmarkstart = \ RawInline('openxml', '<w:bookmarkStart w:id="0" w:name="%s"/><w:r><w:t>' %label) bookmarkend = \ RawInline('openxml', '</w:t></w:r><w:bookmarkEnd w:id="0"/>') return [bookmarkstart, AttrMath(*value), bookmarkend] return None
Processes the attributed equations.
Below is the the instruction that describes the task: ### Input: Processes the attributed equations. ### Response: def process_equations(key, value, fmt, meta): """Processes the attributed equations.""" if key == 'Math' and len(value) == 3: # Process the equation eq = _process_equation(value, fmt) # Get the attributes and label attrs = eq['attrs'] label = attrs[0] if eq['is_unreferenceable']: attrs[0] = '' # The label isn't needed outside this function # Context-dependent output if eq['is_unnumbered']: # Unnumbered is also unreferenceable return None elif fmt in ['latex', 'beamer']: return RawInline('tex', r'\begin{equation}%s\end{equation}'%value[-1]) elif fmt in ('html', 'html5') and LABEL_PATTERN.match(label): # Present equation and its number in a span text = str(references[label]) outerspan = RawInline('html', '<span %s style="display: inline-block; ' 'position: relative; width: 100%%">'%(''\ if eq['is_unreferenceable'] \ else 'id="%s"'%label)) innerspan = RawInline('html', '<span style="position: absolute; ' 'right: 0em; top: %s; line-height:0; ' 'text-align: right">' % ('0' if text.startswith('$') and text.endswith('$') else '50%',)) num = Math({"t":"InlineMath"}, '(%s)' % text[1:-1]) \ if text.startswith('$') and text.endswith('$') \ else Str('(%s)' % text) endspans = RawInline('html', '</span></span>') return [outerspan, AttrMath(*value), innerspan, num, endspans] elif fmt == 'docx': # As per http://officeopenxml.com/WPhyperlink.php bookmarkstart = \ RawInline('openxml', '<w:bookmarkStart w:id="0" w:name="%s"/><w:r><w:t>' %label) bookmarkend = \ RawInline('openxml', '</w:t></w:r><w:bookmarkEnd w:id="0"/>') return [bookmarkstart, AttrMath(*value), bookmarkend] return None
def _parse_assembly(self, assembly_file): """Parse an assembly file in fasta format. This is a Fasta parsing method that populates the :py:attr:`Assembly.contigs` attribute with data for each contig in the assembly. Parameters ---------- assembly_file : str Path to the assembly fasta file. """ with open(assembly_file) as fh: header = None logger.debug("Starting iteration of assembly file: {}".format( assembly_file)) for line in fh: # Skip empty lines if not line.strip(): continue if line.startswith(">"): # Add contig header to contig dictionary header = line[1:].strip() self.contigs[header] = [] else: # Add sequence string for the current contig self.contigs[header].append(line.strip()) # After populating the contigs dictionary, convert the values # list into a string sequence self.contigs = OrderedDict( (header, "".join(seq)) for header, seq in self.contigs.items())
Parse an assembly file in fasta format. This is a Fasta parsing method that populates the :py:attr:`Assembly.contigs` attribute with data for each contig in the assembly. Parameters ---------- assembly_file : str Path to the assembly fasta file.
Below is the the instruction that describes the task: ### Input: Parse an assembly file in fasta format. This is a Fasta parsing method that populates the :py:attr:`Assembly.contigs` attribute with data for each contig in the assembly. Parameters ---------- assembly_file : str Path to the assembly fasta file. ### Response: def _parse_assembly(self, assembly_file): """Parse an assembly file in fasta format. This is a Fasta parsing method that populates the :py:attr:`Assembly.contigs` attribute with data for each contig in the assembly. Parameters ---------- assembly_file : str Path to the assembly fasta file. """ with open(assembly_file) as fh: header = None logger.debug("Starting iteration of assembly file: {}".format( assembly_file)) for line in fh: # Skip empty lines if not line.strip(): continue if line.startswith(">"): # Add contig header to contig dictionary header = line[1:].strip() self.contigs[header] = [] else: # Add sequence string for the current contig self.contigs[header].append(line.strip()) # After populating the contigs dictionary, convert the values # list into a string sequence self.contigs = OrderedDict( (header, "".join(seq)) for header, seq in self.contigs.items())
def get_class_name(self): """ Return the class name of the field :rtype: string """ if self.class_idx_value is None: self.class_idx_value = self.CM.get_type(self.class_idx) return self.class_idx_value
Return the class name of the field :rtype: string
Below is the the instruction that describes the task: ### Input: Return the class name of the field :rtype: string ### Response: def get_class_name(self): """ Return the class name of the field :rtype: string """ if self.class_idx_value is None: self.class_idx_value = self.CM.get_type(self.class_idx) return self.class_idx_value
def _get_fs(thin_pathname): """ Returns the file system type (xfs, ext4) of a given device """ cmd = ['lsblk', '-o', 'FSTYPE', '-n', thin_pathname] fs_return = util.subp(cmd) return fs_return.stdout.strip()
Returns the file system type (xfs, ext4) of a given device
Below is the the instruction that describes the task: ### Input: Returns the file system type (xfs, ext4) of a given device ### Response: def _get_fs(thin_pathname): """ Returns the file system type (xfs, ext4) of a given device """ cmd = ['lsblk', '-o', 'FSTYPE', '-n', thin_pathname] fs_return = util.subp(cmd) return fs_return.stdout.strip()
def apply_parallel(func: Callable, data: List[Any], cpu_cores: int = None) -> List[Any]: """ Apply function to list of elements. Automatically determines the chunk size. """ if not cpu_cores: cpu_cores = cpu_count() try: chunk_size = ceil(len(data) / cpu_cores) pool = Pool(cpu_cores) transformed_data = pool.map(func, chunked(data, chunk_size), chunksize=1) finally: pool.close() pool.join() return transformed_data
Apply function to list of elements. Automatically determines the chunk size.
Below is the the instruction that describes the task: ### Input: Apply function to list of elements. Automatically determines the chunk size. ### Response: def apply_parallel(func: Callable, data: List[Any], cpu_cores: int = None) -> List[Any]: """ Apply function to list of elements. Automatically determines the chunk size. """ if not cpu_cores: cpu_cores = cpu_count() try: chunk_size = ceil(len(data) / cpu_cores) pool = Pool(cpu_cores) transformed_data = pool.map(func, chunked(data, chunk_size), chunksize=1) finally: pool.close() pool.join() return transformed_data
def clean(all=False, docs=False, dist=False, extra=None): """Clean up build files""" run('find . -type f -name "*.py[co]" -delete') run('find . -type d -name "__pycache__" -delete') patterns = ['build', '*.egg-info/'] if all or docs: patterns.append('doc/build/*') if all or dist: patterns.append('dist') if extra: patterns.append(extra) for pattern in patterns: run('rm -rf {}'.format(pattern))
Clean up build files
Below is the the instruction that describes the task: ### Input: Clean up build files ### Response: def clean(all=False, docs=False, dist=False, extra=None): """Clean up build files""" run('find . -type f -name "*.py[co]" -delete') run('find . -type d -name "__pycache__" -delete') patterns = ['build', '*.egg-info/'] if all or docs: patterns.append('doc/build/*') if all or dist: patterns.append('dist') if extra: patterns.append(extra) for pattern in patterns: run('rm -rf {}'.format(pattern))
def _add_qualified_edge(self, u, v, relation, annotations, subject_modifier, object_modifier) -> str: """Add an edge, then adds the opposite direction edge if it should.""" sha512 = self._add_qualified_edge_helper( u, v, relation=relation, annotations=annotations, subject_modifier=subject_modifier, object_modifier=object_modifier, ) if relation in TWO_WAY_RELATIONS: self._add_qualified_edge_helper( v, u, relation=relation, annotations=annotations, object_modifier=subject_modifier, subject_modifier=object_modifier, ) return sha512
Add an edge, then adds the opposite direction edge if it should.
Below is the the instruction that describes the task: ### Input: Add an edge, then adds the opposite direction edge if it should. ### Response: def _add_qualified_edge(self, u, v, relation, annotations, subject_modifier, object_modifier) -> str: """Add an edge, then adds the opposite direction edge if it should.""" sha512 = self._add_qualified_edge_helper( u, v, relation=relation, annotations=annotations, subject_modifier=subject_modifier, object_modifier=object_modifier, ) if relation in TWO_WAY_RELATIONS: self._add_qualified_edge_helper( v, u, relation=relation, annotations=annotations, object_modifier=subject_modifier, subject_modifier=object_modifier, ) return sha512
def delete_relation(sender, instance, **kwargs): """Delete the Relation object when the last Entity is removed.""" def process_signal(relation_id): """Get the relation and delete it if it has no entities left.""" try: relation = Relation.objects.get(pk=relation_id) except Relation.DoesNotExist: return if relation.entities.count() == 0: relation.delete() # Wait for partitions to be recreated. transaction.on_commit(lambda: process_signal(instance.relation_id))
Delete the Relation object when the last Entity is removed.
Below is the the instruction that describes the task: ### Input: Delete the Relation object when the last Entity is removed. ### Response: def delete_relation(sender, instance, **kwargs): """Delete the Relation object when the last Entity is removed.""" def process_signal(relation_id): """Get the relation and delete it if it has no entities left.""" try: relation = Relation.objects.get(pk=relation_id) except Relation.DoesNotExist: return if relation.entities.count() == 0: relation.delete() # Wait for partitions to be recreated. transaction.on_commit(lambda: process_signal(instance.relation_id))
def delete_posix_account( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a POSIX account. Example: >>> from google.cloud import oslogin_v1 >>> >>> client = oslogin_v1.OsLoginServiceClient() >>> >>> name = client.project_path('[USER]', '[PROJECT]') >>> >>> client.delete_posix_account(name) Args: name (str): A reference to the POSIX account to update. POSIX accounts are identified by the project ID they are associated with. A reference to the POSIX account is in format ``users/{user}/projects/{project}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_posix_account" not in self._inner_api_calls: self._inner_api_calls[ "delete_posix_account" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_posix_account, default_retry=self._method_configs["DeletePosixAccount"].retry, default_timeout=self._method_configs["DeletePosixAccount"].timeout, client_info=self._client_info, ) request = oslogin_pb2.DeletePosixAccountRequest(name=name) self._inner_api_calls["delete_posix_account"]( request, retry=retry, timeout=timeout, metadata=metadata )
Deletes a POSIX account. Example: >>> from google.cloud import oslogin_v1 >>> >>> client = oslogin_v1.OsLoginServiceClient() >>> >>> name = client.project_path('[USER]', '[PROJECT]') >>> >>> client.delete_posix_account(name) Args: name (str): A reference to the POSIX account to update. POSIX accounts are identified by the project ID they are associated with. A reference to the POSIX account is in format ``users/{user}/projects/{project}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
Below is the the instruction that describes the task: ### Input: Deletes a POSIX account. Example: >>> from google.cloud import oslogin_v1 >>> >>> client = oslogin_v1.OsLoginServiceClient() >>> >>> name = client.project_path('[USER]', '[PROJECT]') >>> >>> client.delete_posix_account(name) Args: name (str): A reference to the POSIX account to update. POSIX accounts are identified by the project ID they are associated with. A reference to the POSIX account is in format ``users/{user}/projects/{project}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. ### Response: def delete_posix_account( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a POSIX account. Example: >>> from google.cloud import oslogin_v1 >>> >>> client = oslogin_v1.OsLoginServiceClient() >>> >>> name = client.project_path('[USER]', '[PROJECT]') >>> >>> client.delete_posix_account(name) Args: name (str): A reference to the POSIX account to update. POSIX accounts are identified by the project ID they are associated with. A reference to the POSIX account is in format ``users/{user}/projects/{project}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_posix_account" not in self._inner_api_calls: self._inner_api_calls[ "delete_posix_account" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_posix_account, default_retry=self._method_configs["DeletePosixAccount"].retry, default_timeout=self._method_configs["DeletePosixAccount"].timeout, client_info=self._client_info, ) request = oslogin_pb2.DeletePosixAccountRequest(name=name) self._inner_api_calls["delete_posix_account"]( request, retry=retry, timeout=timeout, metadata=metadata )
def Mx(mt, x): """ Return the Mx """ n = len(mt.Cx) sum1 = 0 for j in range(x, n): k = mt.Cx[j] sum1 += k return sum1
Return the Mx
Below is the the instruction that describes the task: ### Input: Return the Mx ### Response: def Mx(mt, x): """ Return the Mx """ n = len(mt.Cx) sum1 = 0 for j in range(x, n): k = mt.Cx[j] sum1 += k return sum1
def convert_relational(relational): """Convert all inequalities to >=0 form. """ rel = relational.rel_op if rel in ['==', '>=', '>']: return relational.lhs-relational.rhs elif rel in ['<=', '<']: return relational.rhs-relational.lhs else: raise Exception("The relational operation ' + rel + ' is not " "implemented!")
Convert all inequalities to >=0 form.
Below is the the instruction that describes the task: ### Input: Convert all inequalities to >=0 form. ### Response: def convert_relational(relational): """Convert all inequalities to >=0 form. """ rel = relational.rel_op if rel in ['==', '>=', '>']: return relational.lhs-relational.rhs elif rel in ['<=', '<']: return relational.rhs-relational.lhs else: raise Exception("The relational operation ' + rel + ' is not " "implemented!")
def lookup(self, row_labels, col_labels): """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result
Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values
Below is the the instruction that describes the task: ### Input: Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values ### Response: def lookup(self, row_labels, col_labels): """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result
async def query(self, stmt, *args): """Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...) """ with (await self.application.db.cursor()) as cur: await cur.execute(stmt, args) return [self.row_to_obj(row, cur) for row in await cur.fetchall()]
Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...)
Below is the the instruction that describes the task: ### Input: Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...) ### Response: async def query(self, stmt, *args): """Query for a list of results. Typical usage:: results = await self.query(...) Or:: for row in await self.query(...) """ with (await self.application.db.cursor()) as cur: await cur.execute(stmt, args) return [self.row_to_obj(row, cur) for row in await cur.fetchall()]
def press(*keys): """ Simulates a key-press for all the keys passed to the function :param keys: list of keys to be pressed :return: None """ for key in keys: win32api.keybd_event(codes[key], 0, 0, 0) release(key)
Simulates a key-press for all the keys passed to the function :param keys: list of keys to be pressed :return: None
Below is the the instruction that describes the task: ### Input: Simulates a key-press for all the keys passed to the function :param keys: list of keys to be pressed :return: None ### Response: def press(*keys): """ Simulates a key-press for all the keys passed to the function :param keys: list of keys to be pressed :return: None """ for key in keys: win32api.keybd_event(codes[key], 0, 0, 0) release(key)
def _load_transition_models(self): """ Adds models for each transition of the state """ self.transitions = [] for transition in self.state.transitions.values(): self._add_model(self.transitions, transition, TransitionModel)
Adds models for each transition of the state
Below is the the instruction that describes the task: ### Input: Adds models for each transition of the state ### Response: def _load_transition_models(self): """ Adds models for each transition of the state """ self.transitions = [] for transition in self.state.transitions.values(): self._add_model(self.transitions, transition, TransitionModel)
def create(server, name, project, apikey, output, threshold, verbose): """Create a new run on an entity matching server. See entity matching service documentation for details on threshold. Returns details for the created run. """ if verbose: log("Entity Matching Server: {}".format(server)) if threshold is None: raise ValueError("Please provide a threshold") # Create a new run try: response = run_create(server, project, apikey, threshold, name) except ServiceError as e: log("Unexpected response with status {}".format(e.status_code)) log(e.text) else: json.dump(response, output)
Create a new run on an entity matching server. See entity matching service documentation for details on threshold. Returns details for the created run.
Below is the the instruction that describes the task: ### Input: Create a new run on an entity matching server. See entity matching service documentation for details on threshold. Returns details for the created run. ### Response: def create(server, name, project, apikey, output, threshold, verbose): """Create a new run on an entity matching server. See entity matching service documentation for details on threshold. Returns details for the created run. """ if verbose: log("Entity Matching Server: {}".format(server)) if threshold is None: raise ValueError("Please provide a threshold") # Create a new run try: response = run_create(server, project, apikey, threshold, name) except ServiceError as e: log("Unexpected response with status {}".format(e.status_code)) log(e.text) else: json.dump(response, output)
def make_item_class_for_custom_generator_class(cls): """ cls: The custom generator class for which to create an item-class """ clsname = cls.__tohu_items_name__ attr_names = cls.field_gens.keys() return make_item_class(clsname, attr_names)
cls: The custom generator class for which to create an item-class
Below is the the instruction that describes the task: ### Input: cls: The custom generator class for which to create an item-class ### Response: def make_item_class_for_custom_generator_class(cls): """ cls: The custom generator class for which to create an item-class """ clsname = cls.__tohu_items_name__ attr_names = cls.field_gens.keys() return make_item_class(clsname, attr_names)
def _evaluate_convolution(t_val, f, g, n_integral = 100, inverse_time=None, return_log=False): """ Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau """ FG = _convolution_integrand(t_val, f, g, inverse_time, return_log) #integrate the interpolation object, return log, make neg_log #print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax)) if (return_log and FG == ttconf.BIG_NUMBER) or \ (not return_log and FG == 0.0): # distributions do not overlap res = ttconf.BIG_NUMBER # we integrate log funcitons else: res = -FG.integrate(a=FG.xmin, b=FG.xmax, n=n_integral, return_log=True) if return_log: return res, -1 else: return np.exp(-res), -1
Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau
Below is the the instruction that describes the task: ### Input: Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau ### Response: def _evaluate_convolution(t_val, f, g, n_integral = 100, inverse_time=None, return_log=False): """ Calculate convolution F(t) = int { f(tau)g(t-tau) } dtau """ FG = _convolution_integrand(t_val, f, g, inverse_time, return_log) #integrate the interpolation object, return log, make neg_log #print('FG:',FG.xmin, FG.xmax, FG(FG.xmin), FG(FG.xmax)) if (return_log and FG == ttconf.BIG_NUMBER) or \ (not return_log and FG == 0.0): # distributions do not overlap res = ttconf.BIG_NUMBER # we integrate log funcitons else: res = -FG.integrate(a=FG.xmin, b=FG.xmax, n=n_integral, return_log=True) if return_log: return res, -1 else: return np.exp(-res), -1
def compile_loaderplugin_entry(self, spec, entry): """ Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY """ modname, source, target, modpath = entry handler = spec[CALMJS_LOADERPLUGIN_REGISTRY].get(modname) if handler: return handler(self, spec, modname, source, target, modpath) logger.warning( "no loaderplugin handler found for plugin entry '%s'", modname) return {}, {}, []
Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY
Below is the the instruction that describes the task: ### Input: Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY ### Response: def compile_loaderplugin_entry(self, spec, entry): """ Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY """ modname, source, target, modpath = entry handler = spec[CALMJS_LOADERPLUGIN_REGISTRY].get(modname) if handler: return handler(self, spec, modname, source, target, modpath) logger.warning( "no loaderplugin handler found for plugin entry '%s'", modname) return {}, {}, []
def decode(data): ''' str -> bytes ''' if riemann.network.CASHADDR_PREFIX is None: raise ValueError('Network {} does not support cashaddresses.' .format(riemann.get_current_network_name())) if data.find(riemann.network.CASHADDR_PREFIX) != 0: raise ValueError('Malformed cashaddr. Cannot locate prefix: {}' .format(riemann.netowrk.CASHADDR_PREFIX)) # the data is everything after the colon prefix, data = data.split(':') decoded = b32decode(data) if not verify_checksum(prefix, decoded): raise ValueError('Bad cash address checksum') converted = convertbits(decoded, 5, 8) return bytes(converted[:-6])
str -> bytes
Below is the the instruction that describes the task: ### Input: str -> bytes ### Response: def decode(data): ''' str -> bytes ''' if riemann.network.CASHADDR_PREFIX is None: raise ValueError('Network {} does not support cashaddresses.' .format(riemann.get_current_network_name())) if data.find(riemann.network.CASHADDR_PREFIX) != 0: raise ValueError('Malformed cashaddr. Cannot locate prefix: {}' .format(riemann.netowrk.CASHADDR_PREFIX)) # the data is everything after the colon prefix, data = data.split(':') decoded = b32decode(data) if not verify_checksum(prefix, decoded): raise ValueError('Bad cash address checksum') converted = convertbits(decoded, 5, 8) return bytes(converted[:-6])
def get_learning_rate(self, iter): ''' Get learning rate with exponential decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate ''' return self.init_lr * (self.gamma ** (iter // self.iter_interval))
Get learning rate with exponential decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate
Below is the the instruction that describes the task: ### Input: Get learning rate with exponential decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate ### Response: def get_learning_rate(self, iter): ''' Get learning rate with exponential decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate ''' return self.init_lr * (self.gamma ** (iter // self.iter_interval))
def _show_stat_wrapper_multi_Progress(count, last_count, start_time, max_count, speed_calc_cycles, width, q, last_speed, prepend, show_stat_function, len_, add_args, lock, info_line, no_move_up=False): """ call the static method show_stat_wrapper for each process """ # print(ESC_BOLD, end='') # sys.stdout.flush() for i in range(len_): _show_stat_wrapper_Progress(count[i], last_count[i], start_time[i], max_count[i], speed_calc_cycles, width, q[i], last_speed[i], prepend[i], show_stat_function, add_args, i, lock[i]) n = len_ if info_line is not None: s = info_line.value.decode('utf-8') s = s.split('\n') n += len(s) for si in s: if width == 'auto': width = get_terminal_width() if len(si) > width: si = si[:width] print("{0:<{1}}".format(si, width)) if no_move_up: n = 0 # this is only a hack to find the end # of the message in a stream # so ESC_HIDDEN+ESC_NO_CHAR_ATTR is a magic ending print(terminal.ESC_MOVE_LINE_UP(n) + terminal.ESC_MY_MAGIC_ENDING, end='') sys.stdout.flush()
call the static method show_stat_wrapper for each process
Below is the the instruction that describes the task: ### Input: call the static method show_stat_wrapper for each process ### Response: def _show_stat_wrapper_multi_Progress(count, last_count, start_time, max_count, speed_calc_cycles, width, q, last_speed, prepend, show_stat_function, len_, add_args, lock, info_line, no_move_up=False): """ call the static method show_stat_wrapper for each process """ # print(ESC_BOLD, end='') # sys.stdout.flush() for i in range(len_): _show_stat_wrapper_Progress(count[i], last_count[i], start_time[i], max_count[i], speed_calc_cycles, width, q[i], last_speed[i], prepend[i], show_stat_function, add_args, i, lock[i]) n = len_ if info_line is not None: s = info_line.value.decode('utf-8') s = s.split('\n') n += len(s) for si in s: if width == 'auto': width = get_terminal_width() if len(si) > width: si = si[:width] print("{0:<{1}}".format(si, width)) if no_move_up: n = 0 # this is only a hack to find the end # of the message in a stream # so ESC_HIDDEN+ESC_NO_CHAR_ATTR is a magic ending print(terminal.ESC_MOVE_LINE_UP(n) + terminal.ESC_MY_MAGIC_ENDING, end='') sys.stdout.flush()
def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True): """Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None """ if xOffset is None: xOffset = 0 if yOffset is None: yOffset = 0 if type(xOffset) in (tuple, list): xOffset, yOffset = xOffset[0], xOffset[1] if xOffset == 0 and yOffset == 0: return # no-op case _failSafeCheck() mousex, mousey = platformModule._position() if mouseDownUp: mouseDown(button=button, _pause=False) _mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button) if mouseDownUp: mouseUp(button=button, _pause=False) _autoPause(pause, _pause)
Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None
Below is the the instruction that describes the task: ### Input: Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None ### Response: def dragRel(xOffset=0, yOffset=0, duration=0.0, tween=linear, button='left', pause=None, _pause=True, mouseDownUp=True): """Performs a mouse drag (mouse movement while a button is held down) to a point on the screen, relative to its current position. The x and y parameters detail where the mouse event happens. If None, the current mouse position is used. If a float value, it is rounded down. If outside the boundaries of the screen, the event happens at edge of the screen. Args: x (int, float, None, tuple, optional): How far left (for negative values) or right (for positive values) to move the cursor. 0 by default. If tuple, this is used for xOffset and yOffset. y (int, float, None, optional): How far up (for negative values) or down (for positive values) to move the cursor. 0 by default. duration (float, optional): The amount of time it takes to move the mouse cursor to the new xy coordinates. If 0, then the mouse cursor is moved instantaneously. 0.0 by default. tween (func, optional): The tweening function used if the duration is not 0. A linear tween is used by default. See the tweens.py file for details. button (str, int, optional): The mouse button clicked. Must be one of 'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by default. mouseDownUp (True, False): When true, the mouseUp/Down actions are not perfomed. Which allows dragging over multiple (small) actions. 'True' by default. Returns: None """ if xOffset is None: xOffset = 0 if yOffset is None: yOffset = 0 if type(xOffset) in (tuple, list): xOffset, yOffset = xOffset[0], xOffset[1] if xOffset == 0 and yOffset == 0: return # no-op case _failSafeCheck() mousex, mousey = platformModule._position() if mouseDownUp: mouseDown(button=button, _pause=False) _mouseMoveDrag('drag', mousex, mousey, xOffset, yOffset, duration, tween, button) if mouseDownUp: mouseUp(button=button, _pause=False) _autoPause(pause, _pause)
def _maybe_refresh_metadata(self, wakeup=False): """Send a metadata request if needed. Returns: int: milliseconds until next refresh """ ttl = self.cluster.ttl() wait_for_in_progress_ms = self.config['request_timeout_ms'] if self._metadata_refresh_in_progress else 0 metadata_timeout = max(ttl, wait_for_in_progress_ms) if metadata_timeout > 0: return metadata_timeout # Beware that the behavior of this method and the computation of # timeouts for poll() are highly dependent on the behavior of # least_loaded_node() node_id = self.least_loaded_node() if node_id is None: log.debug("Give up sending metadata request since no node is available"); return self.config['reconnect_backoff_ms'] if self._can_send_request(node_id): topics = list(self._topics) if not topics and self.cluster.is_bootstrap(node_id): topics = list(self.config['bootstrap_topics_filter']) if self.cluster.need_all_topic_metadata or not topics: topics = [] if self.config['api_version'] < (0, 10) else None api_version = 0 if self.config['api_version'] < (0, 10) else 1 request = MetadataRequest[api_version](topics) log.debug("Sending metadata request %s to node %s", request, node_id) future = self.send(node_id, request, wakeup=wakeup) future.add_callback(self.cluster.update_metadata) future.add_errback(self.cluster.failed_update) self._metadata_refresh_in_progress = True def refresh_done(val_or_error): self._metadata_refresh_in_progress = False future.add_callback(refresh_done) future.add_errback(refresh_done) return self.config['request_timeout_ms'] # If there's any connection establishment underway, wait until it completes. This prevents # the client from unnecessarily connecting to additional nodes while a previous connection # attempt has not been completed. if self._connecting: return self.config['reconnect_backoff_ms'] if self.maybe_connect(node_id, wakeup=wakeup): log.debug("Initializing connection to node %s for metadata request", node_id) return self.config['reconnect_backoff_ms'] # connected but can't send more, OR connecting # In either case we just need to wait for a network event # to let us know the selected connection might be usable again. return float('inf')
Send a metadata request if needed. Returns: int: milliseconds until next refresh
Below is the the instruction that describes the task: ### Input: Send a metadata request if needed. Returns: int: milliseconds until next refresh ### Response: def _maybe_refresh_metadata(self, wakeup=False): """Send a metadata request if needed. Returns: int: milliseconds until next refresh """ ttl = self.cluster.ttl() wait_for_in_progress_ms = self.config['request_timeout_ms'] if self._metadata_refresh_in_progress else 0 metadata_timeout = max(ttl, wait_for_in_progress_ms) if metadata_timeout > 0: return metadata_timeout # Beware that the behavior of this method and the computation of # timeouts for poll() are highly dependent on the behavior of # least_loaded_node() node_id = self.least_loaded_node() if node_id is None: log.debug("Give up sending metadata request since no node is available"); return self.config['reconnect_backoff_ms'] if self._can_send_request(node_id): topics = list(self._topics) if not topics and self.cluster.is_bootstrap(node_id): topics = list(self.config['bootstrap_topics_filter']) if self.cluster.need_all_topic_metadata or not topics: topics = [] if self.config['api_version'] < (0, 10) else None api_version = 0 if self.config['api_version'] < (0, 10) else 1 request = MetadataRequest[api_version](topics) log.debug("Sending metadata request %s to node %s", request, node_id) future = self.send(node_id, request, wakeup=wakeup) future.add_callback(self.cluster.update_metadata) future.add_errback(self.cluster.failed_update) self._metadata_refresh_in_progress = True def refresh_done(val_or_error): self._metadata_refresh_in_progress = False future.add_callback(refresh_done) future.add_errback(refresh_done) return self.config['request_timeout_ms'] # If there's any connection establishment underway, wait until it completes. This prevents # the client from unnecessarily connecting to additional nodes while a previous connection # attempt has not been completed. if self._connecting: return self.config['reconnect_backoff_ms'] if self.maybe_connect(node_id, wakeup=wakeup): log.debug("Initializing connection to node %s for metadata request", node_id) return self.config['reconnect_backoff_ms'] # connected but can't send more, OR connecting # In either case we just need to wait for a network event # to let us know the selected connection might be usable again. return float('inf')
def serialize_operator_not_equal(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <not-equals> <value>text</value> <value><attribute>foobar</attribute></value> <value><path>foobar</path></value> </not-equals> """ elem = etree.Element('not-equals') return self.serialize_value_list(elem, op.args)
Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <not-equals> <value>text</value> <value><attribute>foobar</attribute></value> <value><path>foobar</path></value> </not-equals>
Below is the the instruction that describes the task: ### Input: Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <not-equals> <value>text</value> <value><attribute>foobar</attribute></value> <value><path>foobar</path></value> </not-equals> ### Response: def serialize_operator_not_equal(self, op): """ Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <not-equals> <value>text</value> <value><attribute>foobar</attribute></value> <value><path>foobar</path></value> </not-equals> """ elem = etree.Element('not-equals') return self.serialize_value_list(elem, op.args)
def _improve_class_docs(app, cls, lines): """Improve the documentation of a class.""" if issubclass(cls, models.Model): _add_model_fields_as_params(app, cls, lines) elif issubclass(cls, forms.Form): _add_form_fields(cls, lines)
Improve the documentation of a class.
Below is the the instruction that describes the task: ### Input: Improve the documentation of a class. ### Response: def _improve_class_docs(app, cls, lines): """Improve the documentation of a class.""" if issubclass(cls, models.Model): _add_model_fields_as_params(app, cls, lines) elif issubclass(cls, forms.Form): _add_form_fields(cls, lines)
def set_author(voevent, title=None, shortName=None, logoURL=None, contactName=None, contactEmail=None, contactPhone=None, contributor=None): """For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements. """ # We inspect all local variables except the voevent packet, # Cycling through and assigning them on the Who.Author element. AuthChildren = locals() AuthChildren.pop('voevent') if not voevent.xpath('Who/Author'): etree.SubElement(voevent.Who, 'Author') for k, v in AuthChildren.items(): if v is not None: voevent.Who.Author[k] = v
For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements.
Below is the the instruction that describes the task: ### Input: For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements. ### Response: def set_author(voevent, title=None, shortName=None, logoURL=None, contactName=None, contactEmail=None, contactPhone=None, contributor=None): """For setting fields in the detailed author description. This can optionally be neglected if a well defined AuthorIVORN is supplied. .. note:: Unusually for this library, the args here use CamelCase naming convention, since there's a direct mapping to the ``Author.*`` attributes to which they will be assigned. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. The rest of the arguments are strings corresponding to child elements. """ # We inspect all local variables except the voevent packet, # Cycling through and assigning them on the Who.Author element. AuthChildren = locals() AuthChildren.pop('voevent') if not voevent.xpath('Who/Author'): etree.SubElement(voevent.Who, 'Author') for k, v in AuthChildren.items(): if v is not None: voevent.Who.Author[k] = v
def new_scansock (self): """Return a connected socket for sending scan data to it.""" port = None try: self.sock.sendall("STREAM") port = None for dummy in range(60): data = self.sock.recv(self.sock_rcvbuf) i = data.find("PORT") if i != -1: port = int(data[i+5:]) break except socket.error: self.sock.close() raise if port is None: raise ClamavError(_("clamd is not ready for stream scanning")) sockinfo = get_sockinfo(self.host, port=port) wsock = create_socket(socket.AF_INET, socket.SOCK_STREAM) try: wsock.connect(sockinfo[0][4]) except socket.error: wsock.close() raise return wsock
Return a connected socket for sending scan data to it.
Below is the the instruction that describes the task: ### Input: Return a connected socket for sending scan data to it. ### Response: def new_scansock (self): """Return a connected socket for sending scan data to it.""" port = None try: self.sock.sendall("STREAM") port = None for dummy in range(60): data = self.sock.recv(self.sock_rcvbuf) i = data.find("PORT") if i != -1: port = int(data[i+5:]) break except socket.error: self.sock.close() raise if port is None: raise ClamavError(_("clamd is not ready for stream scanning")) sockinfo = get_sockinfo(self.host, port=port) wsock = create_socket(socket.AF_INET, socket.SOCK_STREAM) try: wsock.connect(sockinfo[0][4]) except socket.error: wsock.close() raise return wsock
def process_tag(node): """ Recursively go through a tag's children, converting them, then convert the tag itself. """ text = '' exceptions = ['table'] for element in node.children: if isinstance(element, NavigableString): text += element elif not node.name in exceptions: text += process_tag(element) try: convert_fn = globals()["convert_%s" % node.name.lower()] text = convert_fn(node, text) except KeyError: pass return text
Recursively go through a tag's children, converting them, then convert the tag itself.
Below is the the instruction that describes the task: ### Input: Recursively go through a tag's children, converting them, then convert the tag itself. ### Response: def process_tag(node): """ Recursively go through a tag's children, converting them, then convert the tag itself. """ text = '' exceptions = ['table'] for element in node.children: if isinstance(element, NavigableString): text += element elif not node.name in exceptions: text += process_tag(element) try: convert_fn = globals()["convert_%s" % node.name.lower()] text = convert_fn(node, text) except KeyError: pass return text
def write(self, group_id, handle): '''Write this parameter group, with parameters, to a file handle. Parameters ---------- group_id : int The numerical ID of the group. handle : file handle An open, writable, binary file handle. ''' name = self.name.encode('utf-8') desc = self.desc.encode('utf-8') handle.write(struct.pack('bb', len(name), -group_id)) handle.write(name) handle.write(struct.pack('<h', 3 + len(desc))) handle.write(struct.pack('B', len(desc))) handle.write(desc) for param in self.params.values(): param.write(group_id, handle)
Write this parameter group, with parameters, to a file handle. Parameters ---------- group_id : int The numerical ID of the group. handle : file handle An open, writable, binary file handle.
Below is the the instruction that describes the task: ### Input: Write this parameter group, with parameters, to a file handle. Parameters ---------- group_id : int The numerical ID of the group. handle : file handle An open, writable, binary file handle. ### Response: def write(self, group_id, handle): '''Write this parameter group, with parameters, to a file handle. Parameters ---------- group_id : int The numerical ID of the group. handle : file handle An open, writable, binary file handle. ''' name = self.name.encode('utf-8') desc = self.desc.encode('utf-8') handle.write(struct.pack('bb', len(name), -group_id)) handle.write(name) handle.write(struct.pack('<h', 3 + len(desc))) handle.write(struct.pack('B', len(desc))) handle.write(desc) for param in self.params.values(): param.write(group_id, handle)
def delete(self, key): """Remove the given key from the cache.""" if not self.debug: self.database.delete(self.make_key(key))
Remove the given key from the cache.
Below is the the instruction that describes the task: ### Input: Remove the given key from the cache. ### Response: def delete(self, key): """Remove the given key from the cache.""" if not self.debug: self.database.delete(self.make_key(key))
def fit(self, X, y=None): """Fit Preprocessing to X. Parameters ---------- sequence : array-like, [sequence_length, n_features] A multivariate timeseries. y : None Ignored Returns ------- self """ return self.partial_fit(np.concatenate(X, axis=0))
Fit Preprocessing to X. Parameters ---------- sequence : array-like, [sequence_length, n_features] A multivariate timeseries. y : None Ignored Returns ------- self
Below is the the instruction that describes the task: ### Input: Fit Preprocessing to X. Parameters ---------- sequence : array-like, [sequence_length, n_features] A multivariate timeseries. y : None Ignored Returns ------- self ### Response: def fit(self, X, y=None): """Fit Preprocessing to X. Parameters ---------- sequence : array-like, [sequence_length, n_features] A multivariate timeseries. y : None Ignored Returns ------- self """ return self.partial_fit(np.concatenate(X, axis=0))
def plot_grid(images, slices=None, axes=2, # general figure arguments figsize=1., rpad=0, cpad=0, # title arguments title=None, tfontsize=20, title_dx=0, title_dy=0, # row arguments rlabels=None, rfontsize=14, rfontcolor='white', rfacecolor='black', # column arguments clabels=None, cfontsize=14, cfontcolor='white', cfacecolor='black', # save arguments filename=None, dpi=400, transparent=True, # other args **kwargs): """ Plot a collection of images in an arbitrarily-defined grid Matplotlib named colors: https://matplotlib.org/examples/color/named_colors.html Arguments --------- images : list of ANTsImage types image(s) to plot. if one image, this image will be used for all grid locations. if multiple images, they should be arrange in a list the same shape as the `gridsize` argument. slices : integer or list of integers slice indices to plot if one integer, this slice index will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument axes : integer or list of integers axis or axes along which to plot image slices if one integer, this axis will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument Example ------- >>> import ants >>> import numpy as np >>> mni1 = ants.image_read(ants.get_data('mni')) >>> mni2 = mni1.smooth_image(1.) >>> mni3 = mni1.smooth_image(2.) >>> mni4 = mni1.smooth_image(3.) >>> images = np.asarray([[mni1, mni2], ... [mni3, mni4]]) >>> slices = np.asarray([[100, 100], ... [100, 100]]) >>> #axes = np.asarray([[2,2],[2,2]]) >>> # standard plotting >>> ants.plot_grid(images=images, slices=slices, title='2x2 Grid') >>> ants.plot_grid(images.reshape(1,4), slices.reshape(1,4), title='1x4 Grid') >>> ants.plot_grid(images.reshape(4,1), slices.reshape(4,1), title='4x1 Grid') >>> # Padding between rows and/or columns >>> ants.plot_grid(images, slices, cpad=0.02, title='Col Padding') >>> ants.plot_grid(images, slices, rpad=0.02, title='Row Padding') >>> ants.plot_grid(images, slices, rpad=0.02, cpad=0.02, title='Row and Col Padding') >>> # Adding plain row and/or column labels >>> ants.plot_grid(images, slices, title='Adding Row Labels', rlabels=['Row #1', 'Row #2']) >>> ants.plot_grid(images, slices, title='Adding Col Labels', clabels=['Col #1', 'Col #2']) >>> ants.plot_grid(images, slices, title='Row and Col Labels', rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2']) >>> # Making a publication-quality image >>> images = np.asarray([[mni1, mni2, mni2], ... [mni3, mni4, mni4]]) >>> slices = np.asarray([[100, 100, 100], ... [100, 100, 100]]) >>> axes = np.asarray([[0, 1, 2], [0, 1, 2]]) >>> ants.plot_grid(images, slices, axes, title='Publication Figures with ANTsPy', tfontsize=20, title_dy=0.03, title_dx=-0.04, rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2', 'Col 3'], rfontsize=16, cfontsize=16) """ def mirror_matrix(x): return x[::-1,:] def rotate270_matrix(x): return mirror_matrix(x.T) def rotate180_matrix(x): return x[::-1,:] def rotate90_matrix(x): return mirror_matrix(x).T def flip_matrix(x): return mirror_matrix(rotate180_matrix(x)) def reorient_slice(x, axis): if (axis != 1): x = rotate90_matrix(x) if (axis == 1): x = rotate90_matrix(x) x = mirror_matrix(x) return x def slice_image(img, axis, idx): if axis == 0: return img[idx,:,:] elif axis == 1: return img[:,idx,:] elif axis == 2: return img[:,:,idx] elif axis == -1: return img[:,:,idx] elif axis == -2: return img[:,idx,:] elif axis == -3: return img[idx,:,:] else: raise ValueError('axis %i not valid' % axis) if isinstance(images, np.ndarray): images = images.tolist() if not isinstance(images, list): raise ValueError('images argument must be of type list') if not isinstance(images[0], list): images = [images] if isinstance(slices, int): one_slice = True if isinstance(slices, np.ndarray): slices = slices.tolist() if isinstance(slices, list): one_slice = False if not isinstance(slices[0], list): slices = [slices] nslicerow = len(slices) nslicecol = len(slices[0]) nrow = len(images) ncol = len(images[0]) if rlabels is None: rlabels = [None]*nrow if clabels is None: clabels = [None]*ncol if (not one_slice): if (nrow != nslicerow) or (ncol != nslicecol): raise ValueError('`images` arg shape (%i,%i) must equal `slices` arg shape (%i,%i)!' % (nrow,ncol,nslicerow,nslicecol)) fig = plt.figure(figsize=((ncol+1)*2.5*figsize, (nrow+1)*2.5*figsize)) if title is not None: basex = 0.5 basey = 0.9 if clabels[0] is None else 0.95 fig.suptitle(title, fontsize=tfontsize, x=basex+title_dx, y=basey+title_dy) if (cpad > 0) and (rpad > 0): bothgridpad = max(cpad, rpad) cpad = 0 rpad = 0 else: bothgridpad = 0.0 gs = gridspec.GridSpec(nrow, ncol, wspace=bothgridpad, hspace=0.0, top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1) + cpad, left=0.5/(ncol+1) + rpad, right=1-0.5/(ncol+1)) for rowidx in range(nrow): for colidx in range(ncol): ax = plt.subplot(gs[rowidx, colidx]) if colidx == 0: if rlabels[rowidx] is not None: bottom, height = .25, .5 top = bottom + height # add label text ax.text(-0.07, 0.5*(bottom+top), rlabels[rowidx], horizontalalignment='right', verticalalignment='center', rotation='vertical', transform=ax.transAxes, color=rfontcolor, fontsize=rfontsize) # add label background extra = 0.3 if rowidx == 0 else 0.0 rect = patches.Rectangle((-0.3, 0), 0.3, 1.0+extra, facecolor=rfacecolor, alpha=1., transform=ax.transAxes, clip_on=False) ax.add_patch(rect) if rowidx == 0: if clabels[colidx] is not None: bottom, height = .25, .5 left, width = .25, .5 right = left + width top = bottom + height ax.text(0.5*(left+right), 0.09+top+bottom, clabels[colidx], horizontalalignment='center', verticalalignment='center', rotation='horizontal', transform=ax.transAxes, color=cfontcolor, fontsize=cfontsize) # add label background rect = patches.Rectangle((0, 1.), 1.0, 0.3, facecolor=cfacecolor, alpha=1., transform=ax.transAxes, clip_on=False) ax.add_patch(rect) tmpimg = images[rowidx][colidx] if isinstance(axes, int): tmpaxis = axes else: tmpaxis = axes[rowidx][colidx] sliceidx = slices[rowidx][colidx] if not one_slice else slices tmpslice = slice_image(tmpimg, tmpaxis, sliceidx) tmpslice = reorient_slice(tmpslice, tmpaxis) ax.imshow(tmpslice, cmap='Greys_r', aspect='auto') ax.axis('off') if filename is not None: filename = os.path.expanduser(filename) plt.savefig(filename, dpi=dpi, transparent=transparent, bbox_inches='tight') plt.close(fig) else: plt.show()
Plot a collection of images in an arbitrarily-defined grid Matplotlib named colors: https://matplotlib.org/examples/color/named_colors.html Arguments --------- images : list of ANTsImage types image(s) to plot. if one image, this image will be used for all grid locations. if multiple images, they should be arrange in a list the same shape as the `gridsize` argument. slices : integer or list of integers slice indices to plot if one integer, this slice index will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument axes : integer or list of integers axis or axes along which to plot image slices if one integer, this axis will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument Example ------- >>> import ants >>> import numpy as np >>> mni1 = ants.image_read(ants.get_data('mni')) >>> mni2 = mni1.smooth_image(1.) >>> mni3 = mni1.smooth_image(2.) >>> mni4 = mni1.smooth_image(3.) >>> images = np.asarray([[mni1, mni2], ... [mni3, mni4]]) >>> slices = np.asarray([[100, 100], ... [100, 100]]) >>> #axes = np.asarray([[2,2],[2,2]]) >>> # standard plotting >>> ants.plot_grid(images=images, slices=slices, title='2x2 Grid') >>> ants.plot_grid(images.reshape(1,4), slices.reshape(1,4), title='1x4 Grid') >>> ants.plot_grid(images.reshape(4,1), slices.reshape(4,1), title='4x1 Grid') >>> # Padding between rows and/or columns >>> ants.plot_grid(images, slices, cpad=0.02, title='Col Padding') >>> ants.plot_grid(images, slices, rpad=0.02, title='Row Padding') >>> ants.plot_grid(images, slices, rpad=0.02, cpad=0.02, title='Row and Col Padding') >>> # Adding plain row and/or column labels >>> ants.plot_grid(images, slices, title='Adding Row Labels', rlabels=['Row #1', 'Row #2']) >>> ants.plot_grid(images, slices, title='Adding Col Labels', clabels=['Col #1', 'Col #2']) >>> ants.plot_grid(images, slices, title='Row and Col Labels', rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2']) >>> # Making a publication-quality image >>> images = np.asarray([[mni1, mni2, mni2], ... [mni3, mni4, mni4]]) >>> slices = np.asarray([[100, 100, 100], ... [100, 100, 100]]) >>> axes = np.asarray([[0, 1, 2], [0, 1, 2]]) >>> ants.plot_grid(images, slices, axes, title='Publication Figures with ANTsPy', tfontsize=20, title_dy=0.03, title_dx=-0.04, rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2', 'Col 3'], rfontsize=16, cfontsize=16)
Below is the the instruction that describes the task: ### Input: Plot a collection of images in an arbitrarily-defined grid Matplotlib named colors: https://matplotlib.org/examples/color/named_colors.html Arguments --------- images : list of ANTsImage types image(s) to plot. if one image, this image will be used for all grid locations. if multiple images, they should be arrange in a list the same shape as the `gridsize` argument. slices : integer or list of integers slice indices to plot if one integer, this slice index will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument axes : integer or list of integers axis or axes along which to plot image slices if one integer, this axis will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument Example ------- >>> import ants >>> import numpy as np >>> mni1 = ants.image_read(ants.get_data('mni')) >>> mni2 = mni1.smooth_image(1.) >>> mni3 = mni1.smooth_image(2.) >>> mni4 = mni1.smooth_image(3.) >>> images = np.asarray([[mni1, mni2], ... [mni3, mni4]]) >>> slices = np.asarray([[100, 100], ... [100, 100]]) >>> #axes = np.asarray([[2,2],[2,2]]) >>> # standard plotting >>> ants.plot_grid(images=images, slices=slices, title='2x2 Grid') >>> ants.plot_grid(images.reshape(1,4), slices.reshape(1,4), title='1x4 Grid') >>> ants.plot_grid(images.reshape(4,1), slices.reshape(4,1), title='4x1 Grid') >>> # Padding between rows and/or columns >>> ants.plot_grid(images, slices, cpad=0.02, title='Col Padding') >>> ants.plot_grid(images, slices, rpad=0.02, title='Row Padding') >>> ants.plot_grid(images, slices, rpad=0.02, cpad=0.02, title='Row and Col Padding') >>> # Adding plain row and/or column labels >>> ants.plot_grid(images, slices, title='Adding Row Labels', rlabels=['Row #1', 'Row #2']) >>> ants.plot_grid(images, slices, title='Adding Col Labels', clabels=['Col #1', 'Col #2']) >>> ants.plot_grid(images, slices, title='Row and Col Labels', rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2']) >>> # Making a publication-quality image >>> images = np.asarray([[mni1, mni2, mni2], ... [mni3, mni4, mni4]]) >>> slices = np.asarray([[100, 100, 100], ... [100, 100, 100]]) >>> axes = np.asarray([[0, 1, 2], [0, 1, 2]]) >>> ants.plot_grid(images, slices, axes, title='Publication Figures with ANTsPy', tfontsize=20, title_dy=0.03, title_dx=-0.04, rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2', 'Col 3'], rfontsize=16, cfontsize=16) ### Response: def plot_grid(images, slices=None, axes=2, # general figure arguments figsize=1., rpad=0, cpad=0, # title arguments title=None, tfontsize=20, title_dx=0, title_dy=0, # row arguments rlabels=None, rfontsize=14, rfontcolor='white', rfacecolor='black', # column arguments clabels=None, cfontsize=14, cfontcolor='white', cfacecolor='black', # save arguments filename=None, dpi=400, transparent=True, # other args **kwargs): """ Plot a collection of images in an arbitrarily-defined grid Matplotlib named colors: https://matplotlib.org/examples/color/named_colors.html Arguments --------- images : list of ANTsImage types image(s) to plot. if one image, this image will be used for all grid locations. if multiple images, they should be arrange in a list the same shape as the `gridsize` argument. slices : integer or list of integers slice indices to plot if one integer, this slice index will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument axes : integer or list of integers axis or axes along which to plot image slices if one integer, this axis will be used for all images if multiple integers, they should be arranged in a list the same shape as the `gridsize` argument Example ------- >>> import ants >>> import numpy as np >>> mni1 = ants.image_read(ants.get_data('mni')) >>> mni2 = mni1.smooth_image(1.) >>> mni3 = mni1.smooth_image(2.) >>> mni4 = mni1.smooth_image(3.) >>> images = np.asarray([[mni1, mni2], ... [mni3, mni4]]) >>> slices = np.asarray([[100, 100], ... [100, 100]]) >>> #axes = np.asarray([[2,2],[2,2]]) >>> # standard plotting >>> ants.plot_grid(images=images, slices=slices, title='2x2 Grid') >>> ants.plot_grid(images.reshape(1,4), slices.reshape(1,4), title='1x4 Grid') >>> ants.plot_grid(images.reshape(4,1), slices.reshape(4,1), title='4x1 Grid') >>> # Padding between rows and/or columns >>> ants.plot_grid(images, slices, cpad=0.02, title='Col Padding') >>> ants.plot_grid(images, slices, rpad=0.02, title='Row Padding') >>> ants.plot_grid(images, slices, rpad=0.02, cpad=0.02, title='Row and Col Padding') >>> # Adding plain row and/or column labels >>> ants.plot_grid(images, slices, title='Adding Row Labels', rlabels=['Row #1', 'Row #2']) >>> ants.plot_grid(images, slices, title='Adding Col Labels', clabels=['Col #1', 'Col #2']) >>> ants.plot_grid(images, slices, title='Row and Col Labels', rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2']) >>> # Making a publication-quality image >>> images = np.asarray([[mni1, mni2, mni2], ... [mni3, mni4, mni4]]) >>> slices = np.asarray([[100, 100, 100], ... [100, 100, 100]]) >>> axes = np.asarray([[0, 1, 2], [0, 1, 2]]) >>> ants.plot_grid(images, slices, axes, title='Publication Figures with ANTsPy', tfontsize=20, title_dy=0.03, title_dx=-0.04, rlabels=['Row 1', 'Row 2'], clabels=['Col 1', 'Col 2', 'Col 3'], rfontsize=16, cfontsize=16) """ def mirror_matrix(x): return x[::-1,:] def rotate270_matrix(x): return mirror_matrix(x.T) def rotate180_matrix(x): return x[::-1,:] def rotate90_matrix(x): return mirror_matrix(x).T def flip_matrix(x): return mirror_matrix(rotate180_matrix(x)) def reorient_slice(x, axis): if (axis != 1): x = rotate90_matrix(x) if (axis == 1): x = rotate90_matrix(x) x = mirror_matrix(x) return x def slice_image(img, axis, idx): if axis == 0: return img[idx,:,:] elif axis == 1: return img[:,idx,:] elif axis == 2: return img[:,:,idx] elif axis == -1: return img[:,:,idx] elif axis == -2: return img[:,idx,:] elif axis == -3: return img[idx,:,:] else: raise ValueError('axis %i not valid' % axis) if isinstance(images, np.ndarray): images = images.tolist() if not isinstance(images, list): raise ValueError('images argument must be of type list') if not isinstance(images[0], list): images = [images] if isinstance(slices, int): one_slice = True if isinstance(slices, np.ndarray): slices = slices.tolist() if isinstance(slices, list): one_slice = False if not isinstance(slices[0], list): slices = [slices] nslicerow = len(slices) nslicecol = len(slices[0]) nrow = len(images) ncol = len(images[0]) if rlabels is None: rlabels = [None]*nrow if clabels is None: clabels = [None]*ncol if (not one_slice): if (nrow != nslicerow) or (ncol != nslicecol): raise ValueError('`images` arg shape (%i,%i) must equal `slices` arg shape (%i,%i)!' % (nrow,ncol,nslicerow,nslicecol)) fig = plt.figure(figsize=((ncol+1)*2.5*figsize, (nrow+1)*2.5*figsize)) if title is not None: basex = 0.5 basey = 0.9 if clabels[0] is None else 0.95 fig.suptitle(title, fontsize=tfontsize, x=basex+title_dx, y=basey+title_dy) if (cpad > 0) and (rpad > 0): bothgridpad = max(cpad, rpad) cpad = 0 rpad = 0 else: bothgridpad = 0.0 gs = gridspec.GridSpec(nrow, ncol, wspace=bothgridpad, hspace=0.0, top=1.-0.5/(nrow+1), bottom=0.5/(nrow+1) + cpad, left=0.5/(ncol+1) + rpad, right=1-0.5/(ncol+1)) for rowidx in range(nrow): for colidx in range(ncol): ax = plt.subplot(gs[rowidx, colidx]) if colidx == 0: if rlabels[rowidx] is not None: bottom, height = .25, .5 top = bottom + height # add label text ax.text(-0.07, 0.5*(bottom+top), rlabels[rowidx], horizontalalignment='right', verticalalignment='center', rotation='vertical', transform=ax.transAxes, color=rfontcolor, fontsize=rfontsize) # add label background extra = 0.3 if rowidx == 0 else 0.0 rect = patches.Rectangle((-0.3, 0), 0.3, 1.0+extra, facecolor=rfacecolor, alpha=1., transform=ax.transAxes, clip_on=False) ax.add_patch(rect) if rowidx == 0: if clabels[colidx] is not None: bottom, height = .25, .5 left, width = .25, .5 right = left + width top = bottom + height ax.text(0.5*(left+right), 0.09+top+bottom, clabels[colidx], horizontalalignment='center', verticalalignment='center', rotation='horizontal', transform=ax.transAxes, color=cfontcolor, fontsize=cfontsize) # add label background rect = patches.Rectangle((0, 1.), 1.0, 0.3, facecolor=cfacecolor, alpha=1., transform=ax.transAxes, clip_on=False) ax.add_patch(rect) tmpimg = images[rowidx][colidx] if isinstance(axes, int): tmpaxis = axes else: tmpaxis = axes[rowidx][colidx] sliceidx = slices[rowidx][colidx] if not one_slice else slices tmpslice = slice_image(tmpimg, tmpaxis, sliceidx) tmpslice = reorient_slice(tmpslice, tmpaxis) ax.imshow(tmpslice, cmap='Greys_r', aspect='auto') ax.axis('off') if filename is not None: filename = os.path.expanduser(filename) plt.savefig(filename, dpi=dpi, transparent=transparent, bbox_inches='tight') plt.close(fig) else: plt.show()
def destroy_ssh_key(self, ssh_key_id): """ This method will delete the SSH key from your account. """ json = self.request('/ssh_keys/%s/destroy' % ssh_key_id, method='GET') status = json.get('status') return status
This method will delete the SSH key from your account.
Below is the the instruction that describes the task: ### Input: This method will delete the SSH key from your account. ### Response: def destroy_ssh_key(self, ssh_key_id): """ This method will delete the SSH key from your account. """ json = self.request('/ssh_keys/%s/destroy' % ssh_key_id, method='GET') status = json.get('status') return status
def output(self, out_file): """Write the converted entries to out_file""" self.out_file = out_file out_file.write('event: ns : Nanoseconds\n') out_file.write('events: ns\n') self._output_summary() for entry in sorted(self.entries, key=_entry_sort_key): self._output_entry(entry)
Write the converted entries to out_file
Below is the the instruction that describes the task: ### Input: Write the converted entries to out_file ### Response: def output(self, out_file): """Write the converted entries to out_file""" self.out_file = out_file out_file.write('event: ns : Nanoseconds\n') out_file.write('events: ns\n') self._output_summary() for entry in sorted(self.entries, key=_entry_sort_key): self._output_entry(entry)
def items(self): """return all the app_names and their values as tuples""" sql = """ SELECT app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error FROM crontabber""" columns = ( 'app_name', 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error' ) items = [] for record in self.transaction_executor(execute_query_fetchall, sql): row = dict(zip(columns, record)) items.append((row.pop('app_name'), row)) return items
return all the app_names and their values as tuples
Below is the the instruction that describes the task: ### Input: return all the app_names and their values as tuples ### Response: def items(self): """return all the app_names and their values as tuples""" sql = """ SELECT app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error FROM crontabber""" columns = ( 'app_name', 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error' ) items = [] for record in self.transaction_executor(execute_query_fetchall, sql): row = dict(zip(columns, record)) items.append((row.pop('app_name'), row)) return items
def substitute_partner(self, state, partners_recp, recp, alloc_id): ''' Establish the partnership to recp and, when it is successfull remove partner with recipient partners_recp. Use with caution: The partner which we are removing is not notified in any way, so he still keeps link in his description. The correct usage of this method requires calling it from two agents which are divorcing. ''' partner = state.partners.find(recipient.IRecipient(partners_recp)) if not partner: msg = 'subsitute_partner() did not find the partner %r' %\ partners_recp self.error(msg) return fiber.fail(partners.FindPartnerError(msg)) return self.establish_partnership(recp, partner.allocation_id, alloc_id, substitute=partner)
Establish the partnership to recp and, when it is successfull remove partner with recipient partners_recp. Use with caution: The partner which we are removing is not notified in any way, so he still keeps link in his description. The correct usage of this method requires calling it from two agents which are divorcing.
Below is the the instruction that describes the task: ### Input: Establish the partnership to recp and, when it is successfull remove partner with recipient partners_recp. Use with caution: The partner which we are removing is not notified in any way, so he still keeps link in his description. The correct usage of this method requires calling it from two agents which are divorcing. ### Response: def substitute_partner(self, state, partners_recp, recp, alloc_id): ''' Establish the partnership to recp and, when it is successfull remove partner with recipient partners_recp. Use with caution: The partner which we are removing is not notified in any way, so he still keeps link in his description. The correct usage of this method requires calling it from two agents which are divorcing. ''' partner = state.partners.find(recipient.IRecipient(partners_recp)) if not partner: msg = 'subsitute_partner() did not find the partner %r' %\ partners_recp self.error(msg) return fiber.fail(partners.FindPartnerError(msg)) return self.establish_partnership(recp, partner.allocation_id, alloc_id, substitute=partner)
def do_info(self, arg, arguments): """ :: Usage: info [--all] Options: --all -a more extensive information Prints some internal information about the shell """ if arguments["--all"]: Console.ok(70 * "-") Console.ok('DIR') Console.ok(70 * "-") for element in dir(self): Console.ok(str(element)) Console.ok(70 * "-") self.print_info()
:: Usage: info [--all] Options: --all -a more extensive information Prints some internal information about the shell
Below is the the instruction that describes the task: ### Input: :: Usage: info [--all] Options: --all -a more extensive information Prints some internal information about the shell ### Response: def do_info(self, arg, arguments): """ :: Usage: info [--all] Options: --all -a more extensive information Prints some internal information about the shell """ if arguments["--all"]: Console.ok(70 * "-") Console.ok('DIR') Console.ok(70 * "-") for element in dir(self): Console.ok(str(element)) Console.ok(70 * "-") self.print_info()
def get_power_state(self, userid): """Get power status of a z/VM instance.""" LOG.debug('Querying power stat of %s' % userid) requestData = "PowerVM " + userid + " status" action = "query power state of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(requestData) with zvmutils.expect_invalid_resp_data(results): status = results['response'][0].partition(': ')[2] return status
Get power status of a z/VM instance.
Below is the the instruction that describes the task: ### Input: Get power status of a z/VM instance. ### Response: def get_power_state(self, userid): """Get power status of a z/VM instance.""" LOG.debug('Querying power stat of %s' % userid) requestData = "PowerVM " + userid + " status" action = "query power state of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(requestData) with zvmutils.expect_invalid_resp_data(results): status = results['response'][0].partition(': ')[2] return status
def makepipecomponent(idf, pname): """make a pipe component generate inlet outlet names""" apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname) apipe.Inlet_Node_Name = "%s_inlet" % (pname,) apipe.Outlet_Node_Name = "%s_outlet" % (pname,) return apipe
make a pipe component generate inlet outlet names
Below is the the instruction that describes the task: ### Input: make a pipe component generate inlet outlet names ### Response: def makepipecomponent(idf, pname): """make a pipe component generate inlet outlet names""" apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname) apipe.Inlet_Node_Name = "%s_inlet" % (pname,) apipe.Outlet_Node_Name = "%s_outlet" % (pname,) return apipe
def update_state(self): """ Update state with latest info from Wink API. """ response = self.api_interface.get_device_state(self, id_override=self.parent_id(), type_override=self.parent_object_type()) self._update_state_from_response(response)
Update state with latest info from Wink API.
Below is the the instruction that describes the task: ### Input: Update state with latest info from Wink API. ### Response: def update_state(self): """ Update state with latest info from Wink API. """ response = self.api_interface.get_device_state(self, id_override=self.parent_id(), type_override=self.parent_object_type()) self._update_state_from_response(response)
def set_main(key, value, path=MAIN_CF): ''' Set a single config value in the main.cf file. If the value does not already exist, it will be appended to the end. CLI Example: salt <minion> postfix.set_main mailq_path /usr/bin/mailq ''' pairs, conf_list = _parse_main(path) new_conf = [] key_line_match = re.compile("^{0}([\\s=]|$)".format(re.escape(key))) if key in pairs: for line in conf_list: if re.match(key_line_match, line): new_conf.append('{0} = {1}'.format(key, value)) else: new_conf.append(line) else: conf_list.append('{0} = {1}'.format(key, value)) new_conf = conf_list _write_conf(new_conf, path) return new_conf
Set a single config value in the main.cf file. If the value does not already exist, it will be appended to the end. CLI Example: salt <minion> postfix.set_main mailq_path /usr/bin/mailq
Below is the the instruction that describes the task: ### Input: Set a single config value in the main.cf file. If the value does not already exist, it will be appended to the end. CLI Example: salt <minion> postfix.set_main mailq_path /usr/bin/mailq ### Response: def set_main(key, value, path=MAIN_CF): ''' Set a single config value in the main.cf file. If the value does not already exist, it will be appended to the end. CLI Example: salt <minion> postfix.set_main mailq_path /usr/bin/mailq ''' pairs, conf_list = _parse_main(path) new_conf = [] key_line_match = re.compile("^{0}([\\s=]|$)".format(re.escape(key))) if key in pairs: for line in conf_list: if re.match(key_line_match, line): new_conf.append('{0} = {1}'.format(key, value)) else: new_conf.append(line) else: conf_list.append('{0} = {1}'.format(key, value)) new_conf = conf_list _write_conf(new_conf, path) return new_conf
def update(self): """ Get virtual size of current process by reading the process' stat file. This should work for Linux. """ try: stat = open('/proc/self/stat') status = open('/proc/self/status') except IOError: # pragma: no cover return False else: stats = stat.read().split() self.vsz = int( stats[22] ) self.rss = int( stats[23] ) * self.pagesize self.pagefaults = int( stats[11] ) for entry in status.readlines(): key, value = entry.split(':') size_in_bytes = lambda x: int(x.split()[0]) * 1024 if key == 'VmData': self.data_segment = size_in_bytes(value) elif key == 'VmExe': self.code_segment = size_in_bytes(value) elif key == 'VmLib': self.shared_segment = size_in_bytes(value) elif key == 'VmStk': self.stack_segment = size_in_bytes(value) key = self.key_map.get(key) if key: self.os_specific.append((key, value.strip())) stat.close() status.close() return True
Get virtual size of current process by reading the process' stat file. This should work for Linux.
Below is the the instruction that describes the task: ### Input: Get virtual size of current process by reading the process' stat file. This should work for Linux. ### Response: def update(self): """ Get virtual size of current process by reading the process' stat file. This should work for Linux. """ try: stat = open('/proc/self/stat') status = open('/proc/self/status') except IOError: # pragma: no cover return False else: stats = stat.read().split() self.vsz = int( stats[22] ) self.rss = int( stats[23] ) * self.pagesize self.pagefaults = int( stats[11] ) for entry in status.readlines(): key, value = entry.split(':') size_in_bytes = lambda x: int(x.split()[0]) * 1024 if key == 'VmData': self.data_segment = size_in_bytes(value) elif key == 'VmExe': self.code_segment = size_in_bytes(value) elif key == 'VmLib': self.shared_segment = size_in_bytes(value) elif key == 'VmStk': self.stack_segment = size_in_bytes(value) key = self.key_map.get(key) if key: self.os_specific.append((key, value.strip())) stat.close() status.close() return True
def plot_fit(self): """ Add the fit to the plot. """ self.plt.plot(*self.fit.fit, **self.options['fit'])
Add the fit to the plot.
Below is the the instruction that describes the task: ### Input: Add the fit to the plot. ### Response: def plot_fit(self): """ Add the fit to the plot. """ self.plt.plot(*self.fit.fit, **self.options['fit'])
async def recv_multipart(self): """ Read from all the associated sockets. :returns: A list of tuples (socket, frames) for each socket that returned a result. """ if not self._sockets: return [] results = [] async def recv_and_store(socket): frames = await socket.recv_multipart() results.append((socket, frames)) tasks = [ asyncio.ensure_future(recv_and_store(socket), loop=self.loop) for socket in self._sockets ] try: await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop, ) finally: for task in tasks: task.cancel() return results
Read from all the associated sockets. :returns: A list of tuples (socket, frames) for each socket that returned a result.
Below is the the instruction that describes the task: ### Input: Read from all the associated sockets. :returns: A list of tuples (socket, frames) for each socket that returned a result. ### Response: async def recv_multipart(self): """ Read from all the associated sockets. :returns: A list of tuples (socket, frames) for each socket that returned a result. """ if not self._sockets: return [] results = [] async def recv_and_store(socket): frames = await socket.recv_multipart() results.append((socket, frames)) tasks = [ asyncio.ensure_future(recv_and_store(socket), loop=self.loop) for socket in self._sockets ] try: await asyncio.wait( tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop, ) finally: for task in tasks: task.cancel() return results
def _to_kraus(rep, data, input_dim, output_dim): """Transform a QuantumChannel to the Kraus representation.""" if rep == 'Kraus': return data if rep == 'Stinespring': return _stinespring_to_kraus(data, input_dim, output_dim) if rep == 'Operator': return _from_operator('Kraus', data, input_dim, output_dim) # Convert via Choi and Kraus if rep != 'Choi': data = _to_choi(rep, data, input_dim, output_dim) return _choi_to_kraus(data, input_dim, output_dim)
Transform a QuantumChannel to the Kraus representation.
Below is the the instruction that describes the task: ### Input: Transform a QuantumChannel to the Kraus representation. ### Response: def _to_kraus(rep, data, input_dim, output_dim): """Transform a QuantumChannel to the Kraus representation.""" if rep == 'Kraus': return data if rep == 'Stinespring': return _stinespring_to_kraus(data, input_dim, output_dim) if rep == 'Operator': return _from_operator('Kraus', data, input_dim, output_dim) # Convert via Choi and Kraus if rep != 'Choi': data = _to_choi(rep, data, input_dim, output_dim) return _choi_to_kraus(data, input_dim, output_dim)
def get_class_field(cls, field_name): """ Return the field object with the given name (for the class, the fields are in the "_redis_attr_%s" form) """ if not cls.has_field(field_name): raise AttributeError('"%s" is not a field for the model "%s"' % (field_name, cls.__name__)) field = getattr(cls, '_redis_attr_%s' % field_name) return field
Return the field object with the given name (for the class, the fields are in the "_redis_attr_%s" form)
Below is the the instruction that describes the task: ### Input: Return the field object with the given name (for the class, the fields are in the "_redis_attr_%s" form) ### Response: def get_class_field(cls, field_name): """ Return the field object with the given name (for the class, the fields are in the "_redis_attr_%s" form) """ if not cls.has_field(field_name): raise AttributeError('"%s" is not a field for the model "%s"' % (field_name, cls.__name__)) field = getattr(cls, '_redis_attr_%s' % field_name) return field
def get_reference_lines(docbody, ref_sect_start_line, ref_sect_end_line, ref_sect_title, ref_line_marker_ptn, title_marker_same_line): """After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document. """ start_idx = ref_sect_start_line if title_marker_same_line: # Title on same line as 1st ref- take title out! title_start = docbody[start_idx].find(ref_sect_title) if title_start != -1: # Set the first line with no title docbody[start_idx] = docbody[start_idx][title_start + len(ref_sect_title):] elif ref_sect_title is not None: # Set the start of the reference section to be after the title line start_idx += 1 if ref_sect_end_line is not None: ref_lines = docbody[start_idx:ref_sect_end_line + 1] else: ref_lines = docbody[start_idx:] if ref_sect_title: ref_lines = strip_footer(ref_lines, ref_sect_title) # Now rebuild reference lines: # (Go through each raw reference line, and format them into a set # of properly ordered lines based on markers) return rebuild_reference_lines(ref_lines, ref_line_marker_ptn)
After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document.
Below is the the instruction that describes the task: ### Input: After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document. ### Response: def get_reference_lines(docbody, ref_sect_start_line, ref_sect_end_line, ref_sect_title, ref_line_marker_ptn, title_marker_same_line): """After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document. """ start_idx = ref_sect_start_line if title_marker_same_line: # Title on same line as 1st ref- take title out! title_start = docbody[start_idx].find(ref_sect_title) if title_start != -1: # Set the first line with no title docbody[start_idx] = docbody[start_idx][title_start + len(ref_sect_title):] elif ref_sect_title is not None: # Set the start of the reference section to be after the title line start_idx += 1 if ref_sect_end_line is not None: ref_lines = docbody[start_idx:ref_sect_end_line + 1] else: ref_lines = docbody[start_idx:] if ref_sect_title: ref_lines = strip_footer(ref_lines, ref_sect_title) # Now rebuild reference lines: # (Go through each raw reference line, and format them into a set # of properly ordered lines based on markers) return rebuild_reference_lines(ref_lines, ref_line_marker_ptn)
def _set_ra_dns_server(self, v, load=False): """ Setter method for ra_dns_server, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_dns_server (list) If this variable is read-only (config: false) in the source YANG file, then _set_ra_dns_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ra_dns_server() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dns_server_prefix",ra_dns_server.ra_dns_server, yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dns-server-prefix', extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}), is_container='list', yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ra_dns_server must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dns_server_prefix",ra_dns_server.ra_dns_server, yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dns-server-prefix', extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}), is_container='list', yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True)""", }) self.__ra_dns_server = t if hasattr(self, '_set'): self._set()
Setter method for ra_dns_server, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_dns_server (list) If this variable is read-only (config: false) in the source YANG file, then _set_ra_dns_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ra_dns_server() directly.
Below is the the instruction that describes the task: ### Input: Setter method for ra_dns_server, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_dns_server (list) If this variable is read-only (config: false) in the source YANG file, then _set_ra_dns_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ra_dns_server() directly. ### Response: def _set_ra_dns_server(self, v, load=False): """ Setter method for ra_dns_server, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/ra_dns_server (list) If this variable is read-only (config: false) in the source YANG file, then _set_ra_dns_server is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ra_dns_server() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dns_server_prefix",ra_dns_server.ra_dns_server, yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dns-server-prefix', extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}), is_container='list', yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ra_dns_server must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dns_server_prefix",ra_dns_server.ra_dns_server, yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dns-server-prefix', extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}), is_container='list', yang_name="ra-dns-server", rest_name="ra-dns-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set DNS server option', u'cli-suppress-list-no': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'IpV6NdRaDnsServerVlanIntf'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='list', is_config=True)""", }) self.__ra_dns_server = t if hasattr(self, '_set'): self._set()
def obo(self): """str: the ontology serialized in obo format. """ meta = self._obo_meta() meta = [meta] if meta else [] newline = "\n\n" if six.PY3 else "\n\n".encode('utf-8') try: # if 'namespace' in self.meta: return newline.join( meta + [ r.obo for r in self.typedefs ] + [ t.obo for t in self if t.id.startswith(self.meta['namespace'][0]) ]) except KeyError: return newline.join( meta + [ r.obo for r in self.typedefs ] + [ t.obo for t in self ])
str: the ontology serialized in obo format.
Below is the the instruction that describes the task: ### Input: str: the ontology serialized in obo format. ### Response: def obo(self): """str: the ontology serialized in obo format. """ meta = self._obo_meta() meta = [meta] if meta else [] newline = "\n\n" if six.PY3 else "\n\n".encode('utf-8') try: # if 'namespace' in self.meta: return newline.join( meta + [ r.obo for r in self.typedefs ] + [ t.obo for t in self if t.id.startswith(self.meta['namespace'][0]) ]) except KeyError: return newline.join( meta + [ r.obo for r in self.typedefs ] + [ t.obo for t in self ])
def set_const(const, val): '''Convenience wrapper to reliably set the value of a constant from outside of package scope''' try: cur = getattr(_c, const) except AttributeError: raise FSQEnvError(errno.ENOENT, u'no such constant:'\ u' {0}'.format(const)) except TypeError: raise TypeError(errno.EINVAL, u'const name must be a string or'\ u' unicode object, not:'\ u' {0}'.format(const.__class__.__name__)) should_be = cur.__class__ try: if not isinstance(val, should_be): if should_be is unicode or cur is None: val = coerce_unicode(val, _c.FSQ_CHARSET) elif should_be is int and const.endswith('MODE'): val = int(val, 8) elif isinstance(cur, numbers.Integral): val = int(val) else: should_be(val) except (TypeError, ValueError, ): raise FSQEnvError(errno.EINVAL, u'invalid type for constant {0},'\ u' should be {1}, not:'\ u' {2}'.format(const, should_be.__name__, val.__class__.__name__)) setattr(_c, const, val) return val
Convenience wrapper to reliably set the value of a constant from outside of package scope
Below is the the instruction that describes the task: ### Input: Convenience wrapper to reliably set the value of a constant from outside of package scope ### Response: def set_const(const, val): '''Convenience wrapper to reliably set the value of a constant from outside of package scope''' try: cur = getattr(_c, const) except AttributeError: raise FSQEnvError(errno.ENOENT, u'no such constant:'\ u' {0}'.format(const)) except TypeError: raise TypeError(errno.EINVAL, u'const name must be a string or'\ u' unicode object, not:'\ u' {0}'.format(const.__class__.__name__)) should_be = cur.__class__ try: if not isinstance(val, should_be): if should_be is unicode or cur is None: val = coerce_unicode(val, _c.FSQ_CHARSET) elif should_be is int and const.endswith('MODE'): val = int(val, 8) elif isinstance(cur, numbers.Integral): val = int(val) else: should_be(val) except (TypeError, ValueError, ): raise FSQEnvError(errno.EINVAL, u'invalid type for constant {0},'\ u' should be {1}, not:'\ u' {2}'.format(const, should_be.__name__, val.__class__.__name__)) setattr(_c, const, val) return val
def context_factory(apply_globally=False, api=None): """A decorator that registers a single hug context factory""" def decorator(context_factory_): if apply_globally: hug.defaults.context_factory = context_factory_ else: apply_to_api = hug.API(api) if api else hug.api.from_object(context_factory_) apply_to_api.context_factory = context_factory_ return context_factory_ return decorator
A decorator that registers a single hug context factory
Below is the the instruction that describes the task: ### Input: A decorator that registers a single hug context factory ### Response: def context_factory(apply_globally=False, api=None): """A decorator that registers a single hug context factory""" def decorator(context_factory_): if apply_globally: hug.defaults.context_factory = context_factory_ else: apply_to_api = hug.API(api) if api else hug.api.from_object(context_factory_) apply_to_api.context_factory = context_factory_ return context_factory_ return decorator
def fetch_token(self): """Gains token from secure backend service. :return: Token formatted for Cocaine protocol header. """ grant_type = 'client_credentials' channel = yield self._tvm.ticket_full( self._client_id, self._client_secret, grant_type, {}) ticket = yield channel.rx.get() raise gen.Return(self._make_token(ticket))
Gains token from secure backend service. :return: Token formatted for Cocaine protocol header.
Below is the the instruction that describes the task: ### Input: Gains token from secure backend service. :return: Token formatted for Cocaine protocol header. ### Response: def fetch_token(self): """Gains token from secure backend service. :return: Token formatted for Cocaine protocol header. """ grant_type = 'client_credentials' channel = yield self._tvm.ticket_full( self._client_id, self._client_secret, grant_type, {}) ticket = yield channel.rx.get() raise gen.Return(self._make_token(ticket))
def spinn5_local_eth_coord(x, y, w, h, root_x=0, root_y=0): """Get the coordinates of a chip's local ethernet connected chip. Returns the coordinates of the ethernet connected chip on the same board as the supplied chip. .. note:: This function assumes the system is constructed from SpiNN-5 boards .. warning:: In general, applications should interrogate the machine to determine which Ethernet connected chip is considered 'local' to a particular SpiNNaker chip, e.g. using :py:class:`rig.machine_control.MachineController.get_system_info`:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(si[(3, 2)].local_ethernet_chip) (0, 0) :py:func:`.spinn5_local_eth_coord` will always produce the coordinates of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as the supplied chip. In future versions of the low-level system software, some other method of choosing local Ethernet connected chips may be used. Parameters ---------- x, y : int Chip whose coordinates are of interest. w, h : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. """ dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12] return ((x + int(dx)) % w), ((y + int(dy)) % h)
Get the coordinates of a chip's local ethernet connected chip. Returns the coordinates of the ethernet connected chip on the same board as the supplied chip. .. note:: This function assumes the system is constructed from SpiNN-5 boards .. warning:: In general, applications should interrogate the machine to determine which Ethernet connected chip is considered 'local' to a particular SpiNNaker chip, e.g. using :py:class:`rig.machine_control.MachineController.get_system_info`:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(si[(3, 2)].local_ethernet_chip) (0, 0) :py:func:`.spinn5_local_eth_coord` will always produce the coordinates of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as the supplied chip. In future versions of the low-level system software, some other method of choosing local Ethernet connected chips may be used. Parameters ---------- x, y : int Chip whose coordinates are of interest. w, h : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`.
Below is the the instruction that describes the task: ### Input: Get the coordinates of a chip's local ethernet connected chip. Returns the coordinates of the ethernet connected chip on the same board as the supplied chip. .. note:: This function assumes the system is constructed from SpiNN-5 boards .. warning:: In general, applications should interrogate the machine to determine which Ethernet connected chip is considered 'local' to a particular SpiNNaker chip, e.g. using :py:class:`rig.machine_control.MachineController.get_system_info`:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(si[(3, 2)].local_ethernet_chip) (0, 0) :py:func:`.spinn5_local_eth_coord` will always produce the coordinates of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as the supplied chip. In future versions of the low-level system software, some other method of choosing local Ethernet connected chips may be used. Parameters ---------- x, y : int Chip whose coordinates are of interest. w, h : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. ### Response: def spinn5_local_eth_coord(x, y, w, h, root_x=0, root_y=0): """Get the coordinates of a chip's local ethernet connected chip. Returns the coordinates of the ethernet connected chip on the same board as the supplied chip. .. note:: This function assumes the system is constructed from SpiNN-5 boards .. warning:: In general, applications should interrogate the machine to determine which Ethernet connected chip is considered 'local' to a particular SpiNNaker chip, e.g. using :py:class:`rig.machine_control.MachineController.get_system_info`:: >> from rig.machine_control import MachineController >> mc = MachineController("my-machine") >> si = mc.get_system_info() >> print(si[(3, 2)].local_ethernet_chip) (0, 0) :py:func:`.spinn5_local_eth_coord` will always produce the coordinates of the Ethernet-connected SpiNNaker chip on the same SpiNN-5 board as the supplied chip. In future versions of the low-level system software, some other method of choosing local Ethernet connected chips may be used. Parameters ---------- x, y : int Chip whose coordinates are of interest. w, h : int Width and height of the system in chips. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. """ dx, dy = SPINN5_ETH_OFFSET[(y - root_y) % 12][(x - root_x) % 12] return ((x + int(dx)) % w), ((y + int(dy)) % h)
def _writepydoc(doc, *args): """create pydoc html pages doc -- destination directory for documents *args -- modules run thru pydoc """ ok = True if not os.path.isdir(doc): os.makedirs(doc) if os.path.curdir not in sys.path: sys.path.append(os.path.curdir) for f in args: if f.startswith('./'): f = f[2:] name = os.path.sep.join(f.strip('.py').split(os.path.sep)) try: e = __import__(name) except Exception,ex: raise # _writebrokedoc(doc, ex, name) # continue if name.endswith('_client'): _writeclientdoc(doc, e) continue if name.endswith('_types'): _writetypesdoc(doc, e) continue try: _writedoc(doc, e) except IndexError,ex: _writebrokedoc(doc, ex, name) continue
create pydoc html pages doc -- destination directory for documents *args -- modules run thru pydoc
Below is the the instruction that describes the task: ### Input: create pydoc html pages doc -- destination directory for documents *args -- modules run thru pydoc ### Response: def _writepydoc(doc, *args): """create pydoc html pages doc -- destination directory for documents *args -- modules run thru pydoc """ ok = True if not os.path.isdir(doc): os.makedirs(doc) if os.path.curdir not in sys.path: sys.path.append(os.path.curdir) for f in args: if f.startswith('./'): f = f[2:] name = os.path.sep.join(f.strip('.py').split(os.path.sep)) try: e = __import__(name) except Exception,ex: raise # _writebrokedoc(doc, ex, name) # continue if name.endswith('_client'): _writeclientdoc(doc, e) continue if name.endswith('_types'): _writetypesdoc(doc, e) continue try: _writedoc(doc, e) except IndexError,ex: _writebrokedoc(doc, ex, name) continue
def get_context(self, arr, expr, context): """ Returns a context dictionary for use in evaluating the expression. :param arr: The input array. :param expr: The input expression. :param context: Evaluation context. """ expression_names = [x for x in self.get_expression_names(expr) if x not in set(context.keys()).union(['i'])] if len(expression_names) != 1: raise ValueError('The expression must have exactly one variable.') return {expression_names[0]: arr}
Returns a context dictionary for use in evaluating the expression. :param arr: The input array. :param expr: The input expression. :param context: Evaluation context.
Below is the the instruction that describes the task: ### Input: Returns a context dictionary for use in evaluating the expression. :param arr: The input array. :param expr: The input expression. :param context: Evaluation context. ### Response: def get_context(self, arr, expr, context): """ Returns a context dictionary for use in evaluating the expression. :param arr: The input array. :param expr: The input expression. :param context: Evaluation context. """ expression_names = [x for x in self.get_expression_names(expr) if x not in set(context.keys()).union(['i'])] if len(expression_names) != 1: raise ValueError('The expression must have exactly one variable.') return {expression_names[0]: arr}
def assign_aromatic(mol): """Assign aromatic ring sp2 atom: pi=1 -> +1 N, O, S, C- -> +2 >C=O, B, C+ -> 0 sp3 atom -> not aromatic sum of the score satisfies 4n+2 -> aromatic """ mol.require("Valence") mol.require("MinifiedRing") for ring in mol.rings: pi_cnt = 0 for r in ring: if mol.atom(r).pi == 0: if mol.atom(r).symbol == "C": if mol.atom(r).charge == 1: pass elif mol.atom(r).charge == -1: pi_cnt += 2 else: break elif mol.atom(r).charge == 0: if mol.atom(r).symbol in ("N", "O", "S"): pi_cnt += 2 elif mol.atom(r).symbol == "B": pass else: break else: break elif mol.atom(r).pi == 1: if mol.atom(r).carbonyl_C: pass else: pi_cnt += 1 else: break else: if pi_cnt % 4 == 2: for u, v in iterator.consecutive(ring + [ring[0]], 2): mol.atom(u).aromatic = True mol.bond(u, v).aromatic = True mol.descriptors.add("Aromatic")
Assign aromatic ring sp2 atom: pi=1 -> +1 N, O, S, C- -> +2 >C=O, B, C+ -> 0 sp3 atom -> not aromatic sum of the score satisfies 4n+2 -> aromatic
Below is the the instruction that describes the task: ### Input: Assign aromatic ring sp2 atom: pi=1 -> +1 N, O, S, C- -> +2 >C=O, B, C+ -> 0 sp3 atom -> not aromatic sum of the score satisfies 4n+2 -> aromatic ### Response: def assign_aromatic(mol): """Assign aromatic ring sp2 atom: pi=1 -> +1 N, O, S, C- -> +2 >C=O, B, C+ -> 0 sp3 atom -> not aromatic sum of the score satisfies 4n+2 -> aromatic """ mol.require("Valence") mol.require("MinifiedRing") for ring in mol.rings: pi_cnt = 0 for r in ring: if mol.atom(r).pi == 0: if mol.atom(r).symbol == "C": if mol.atom(r).charge == 1: pass elif mol.atom(r).charge == -1: pi_cnt += 2 else: break elif mol.atom(r).charge == 0: if mol.atom(r).symbol in ("N", "O", "S"): pi_cnt += 2 elif mol.atom(r).symbol == "B": pass else: break else: break elif mol.atom(r).pi == 1: if mol.atom(r).carbonyl_C: pass else: pi_cnt += 1 else: break else: if pi_cnt % 4 == 2: for u, v in iterator.consecutive(ring + [ring[0]], 2): mol.atom(u).aromatic = True mol.bond(u, v).aromatic = True mol.descriptors.add("Aromatic")
def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0): """Log in Naver and get cookie Agrs: user_id: Naver account's login id password: Naver account's login password Returns: True: Login success False: Login failed Remarks: self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES """ self.user_id = user_id self.password = password if self.user_id == None or self.password == None: print "[*] Error __init__: user_id and password is needed" return False try: cookie = naver_login(user_id, password) except: return False self.session.cookies.set('NID_AUT', cookie["NID_AUT"]) self.session.cookies.set('NID_SES', cookie["NID_SES"]) s = self.getRegisterUserInfo(svctype, auth) if s is True: return True else: print "[*] Error getRegisterUserInfo: failed" return False
Log in Naver and get cookie Agrs: user_id: Naver account's login id password: Naver account's login password Returns: True: Login success False: Login failed Remarks: self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES
Below is the the instruction that describes the task: ### Input: Log in Naver and get cookie Agrs: user_id: Naver account's login id password: Naver account's login password Returns: True: Login success False: Login failed Remarks: self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES ### Response: def login(self, user_id, password, svctype = "Android NDrive App ver", auth = 0): """Log in Naver and get cookie Agrs: user_id: Naver account's login id password: Naver account's login password Returns: True: Login success False: Login failed Remarks: self.cookie is a dictionary with 5 keys: path, domain, NID_AUT, nid_inf, NID_SES """ self.user_id = user_id self.password = password if self.user_id == None or self.password == None: print "[*] Error __init__: user_id and password is needed" return False try: cookie = naver_login(user_id, password) except: return False self.session.cookies.set('NID_AUT', cookie["NID_AUT"]) self.session.cookies.set('NID_SES', cookie["NID_SES"]) s = self.getRegisterUserInfo(svctype, auth) if s is True: return True else: print "[*] Error getRegisterUserInfo: failed" return False
def get_image_attribute(self, image_id, attribute='launchPermission'): """ Gets an attribute from an image. :type image_id: string :param image_id: The Amazon image id for which you want info about :type attribute: string :param attribute: The attribute you need information about. Valid choices are: * launchPermission * productCodes * blockDeviceMapping :rtype: :class:`boto.ec2.image.ImageAttribute` :return: An ImageAttribute object representing the value of the attribute requested """ params = {'ImageId' : image_id, 'Attribute' : attribute} return self.get_object('DescribeImageAttribute', params, ImageAttribute, verb='POST')
Gets an attribute from an image. :type image_id: string :param image_id: The Amazon image id for which you want info about :type attribute: string :param attribute: The attribute you need information about. Valid choices are: * launchPermission * productCodes * blockDeviceMapping :rtype: :class:`boto.ec2.image.ImageAttribute` :return: An ImageAttribute object representing the value of the attribute requested
Below is the the instruction that describes the task: ### Input: Gets an attribute from an image. :type image_id: string :param image_id: The Amazon image id for which you want info about :type attribute: string :param attribute: The attribute you need information about. Valid choices are: * launchPermission * productCodes * blockDeviceMapping :rtype: :class:`boto.ec2.image.ImageAttribute` :return: An ImageAttribute object representing the value of the attribute requested ### Response: def get_image_attribute(self, image_id, attribute='launchPermission'): """ Gets an attribute from an image. :type image_id: string :param image_id: The Amazon image id for which you want info about :type attribute: string :param attribute: The attribute you need information about. Valid choices are: * launchPermission * productCodes * blockDeviceMapping :rtype: :class:`boto.ec2.image.ImageAttribute` :return: An ImageAttribute object representing the value of the attribute requested """ params = {'ImageId' : image_id, 'Attribute' : attribute} return self.get_object('DescribeImageAttribute', params, ImageAttribute, verb='POST')
def _get_sorted_cond_keys(self, keys_list): """ This function returns only the elements starting with 'analysisservice-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element """ # The names can be found in reflexrulewidget.pt inside the # conditionscontainer div. cond_list = [] for key in keys_list: if key.startswith('analysisservice-'): cond_list.append(key) cond_list.sort() return cond_list
This function returns only the elements starting with 'analysisservice-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element
Below is the the instruction that describes the task: ### Input: This function returns only the elements starting with 'analysisservice-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element ### Response: def _get_sorted_cond_keys(self, keys_list): """ This function returns only the elements starting with 'analysisservice-' in 'keys_list'. The returned list is sorted by the index appended to the end of each element """ # The names can be found in reflexrulewidget.pt inside the # conditionscontainer div. cond_list = [] for key in keys_list: if key.startswith('analysisservice-'): cond_list.append(key) cond_list.sort() return cond_list
def get_slice(self, start_index=None, stop_index=None, as_list=False): """ For sorted Series will return either a Series or list of all of the rows where the index is greater than or equal to the start_index if provided and less than or equal to the stop_index if provided. If either the start or stop index is None then will include from the first or last element, similar to standard python slide of [:5] or [:5]. Both end points are considered inclusive. :param start_index: lowest index value to include, or None to start from the first row :param stop_index: highest index value to include, or None to end at the last row :param as_list: if True then return a list of the indexes and values :return: Series or tuple of (index list, values list) """ if not self._sort: raise RuntimeError('Can only use get_slice on sorted Series') start_location = bisect_left(self._index, start_index) if start_index is not None else None stop_location = bisect_right(self._index, stop_index) if stop_index is not None else None index = self._index[start_location:stop_location] data = self._data[start_location:stop_location] if as_list: return index, data else: return Series(data=data, index=index, data_name=self._data_name, index_name=self._index_name, sort=self._sort)
For sorted Series will return either a Series or list of all of the rows where the index is greater than or equal to the start_index if provided and less than or equal to the stop_index if provided. If either the start or stop index is None then will include from the first or last element, similar to standard python slide of [:5] or [:5]. Both end points are considered inclusive. :param start_index: lowest index value to include, or None to start from the first row :param stop_index: highest index value to include, or None to end at the last row :param as_list: if True then return a list of the indexes and values :return: Series or tuple of (index list, values list)
Below is the the instruction that describes the task: ### Input: For sorted Series will return either a Series or list of all of the rows where the index is greater than or equal to the start_index if provided and less than or equal to the stop_index if provided. If either the start or stop index is None then will include from the first or last element, similar to standard python slide of [:5] or [:5]. Both end points are considered inclusive. :param start_index: lowest index value to include, or None to start from the first row :param stop_index: highest index value to include, or None to end at the last row :param as_list: if True then return a list of the indexes and values :return: Series or tuple of (index list, values list) ### Response: def get_slice(self, start_index=None, stop_index=None, as_list=False): """ For sorted Series will return either a Series or list of all of the rows where the index is greater than or equal to the start_index if provided and less than or equal to the stop_index if provided. If either the start or stop index is None then will include from the first or last element, similar to standard python slide of [:5] or [:5]. Both end points are considered inclusive. :param start_index: lowest index value to include, or None to start from the first row :param stop_index: highest index value to include, or None to end at the last row :param as_list: if True then return a list of the indexes and values :return: Series or tuple of (index list, values list) """ if not self._sort: raise RuntimeError('Can only use get_slice on sorted Series') start_location = bisect_left(self._index, start_index) if start_index is not None else None stop_location = bisect_right(self._index, stop_index) if stop_index is not None else None index = self._index[start_location:stop_location] data = self._data[start_location:stop_location] if as_list: return index, data else: return Series(data=data, index=index, data_name=self._data_name, index_name=self._index_name, sort=self._sort)
def _unary_op(name, doc="unary operator"): """ Create a method for given unary operator """ def _(self): jc = getattr(self._jc, name)() return Column(jc) _.__doc__ = doc return _
Create a method for given unary operator
Below is the the instruction that describes the task: ### Input: Create a method for given unary operator ### Response: def _unary_op(name, doc="unary operator"): """ Create a method for given unary operator """ def _(self): jc = getattr(self._jc, name)() return Column(jc) _.__doc__ = doc return _
def create_report(outdirname, report_filename, **kwargs): """Creates a LaTeX report. :param report_filename: the name of the file. :param outdirname: the name of the output directory. :type report_filename: str :type outdirname: str """ # Checking the required variables if "steps" in kwargs: assert "descriptions" in kwargs assert "long_descriptions" in kwargs assert "steps_filename" not in kwargs else: assert "steps_filename" in kwargs assert "descriptions" not in kwargs assert "long_descriptions" not in kwargs assert "summaries" in kwargs assert "background" in kwargs assert "project_name" in kwargs assert "summary_fn" in kwargs assert "report_title" in kwargs assert "report_author" in kwargs assert "initial_files" in kwargs assert "final_nb_markers" in kwargs assert "final_nb_samples" in kwargs assert "final_files" in kwargs assert "plink_version" in kwargs assert "graphic_paths_fn" in kwargs # Formatting the background section background_section = _format_background(kwargs["background"]) # Writing the method steps to a separate file (for access later) steps_filename = None if "steps_filename" in kwargs: steps_filename = kwargs["steps_filename"] else: steps_filename = os.path.join(outdirname, "steps_summary.tex") with open(steps_filename, "w") as o_file: zipped = zip(kwargs["steps"], kwargs["descriptions"], kwargs["long_descriptions"]) for step, desc, long_desc in zipped: if desc.endswith("."): desc = desc[:-1] step = step.replace("_", r"\_") to_print = latex.item(desc) to_print += " [{}].".format(latex.texttt(step)) if long_desc is not None: to_print += " " + long_desc print >>o_file, latex.wrap_lines(to_print) + "\n" # Adding the content of the results section result_summaries = [] for name in kwargs["summaries"]: full_path = os.path.abspath(name) if os.path.isfile(full_path): rel_path = os.path.relpath(full_path, outdirname) result_summaries.append(re.sub(r"\\", "/", rel_path)) # Reading the initial_files file initial_files = None with open(kwargs["initial_files"], "r") as i_file: initial_files = i_file.read().splitlines() # Reading the final_files file final_files = None with open(kwargs["final_files"], "r") as i_file: final_files = [i.split("\t")[0] for i in i_file.read().splitlines()] # Adding the bibliography content biblio_entry = latex.bib_entry( name="pyGenClean", authors="Lemieux Perreault LP, Provost S, Legault MA, Barhdadi A, " r"Dub\'e MP", title="pyGenClean: efficient tool for genetic data clean up before " "association testing", journal="Bioinformatics", year="2013", volume="29", number="13", pages="1704--1705", ) + "\n" * 2 + latex.bib_entry( name="plink", authors="Purcell S, Neale B, Todd-Brown K, Thomas L, Ferreira MAR, " "Bender D, Maller J, Sklar P, de Bakker PIW, Daly MJ, Sham PC", title="PLINK: a tool set for whole-genome association and " "population-based linkage analyses", journal="American Journal of Human Genetics", year="2007", volume="81", number="3", pages="559--575", ) + "\n" * 2 + latex.bib_entry( name="bafRegress", authors=r"Goo J, Matthew F, Kurt NH, Jane MR, Kimberly FD, " r"Gon{\c{c}}alo RA, Michael B, Hyun Min K", title="Detecting and estimating contamination of human DNA samples in " "sequencing and array-based genotype data", journal="The American Journal of Human Genetics", year="2012", volume="91", number="5", pages="839--848", ) # Getting the template main_template = latex.jinja2_env.get_template("main_document.tex") # Getting the data today = datetime.today() # Reading the graphics path graphic_paths = [] if kwargs["graphic_paths_fn"] is not None: with open(kwargs["graphic_paths_fn"], "r") as i_file: graphic_paths = [ re.sub(r"\\", "/", path) + ("" if path.endswith("/") else "/") for path in i_file.read().splitlines() ] try: with open(report_filename, "w") as i_file: # Rendering the template print >>i_file, main_template.render( project_name=latex.sanitize_tex(kwargs["project_name"]), month=today.strftime("%B"), day=today.day, year=today.year, background_content=background_section, result_summaries=result_summaries, bibliography_content=biblio_entry, pygenclean_version=pygenclean_version, plink_version=kwargs["plink_version"], steps_filename=os.path.basename(steps_filename), final_results=_create_summary_table( kwargs["summary_fn"], latex.jinja2_env.get_template("summary_table.tex"), nb_samples=kwargs["final_nb_samples"], nb_markers=kwargs["final_nb_markers"], ), report_title=latex.sanitize_tex(kwargs["report_title"]), report_author=latex.sanitize_tex(kwargs["report_author"]), initial_files=initial_files, final_files=final_files, final_nb_samples=kwargs["final_nb_samples"], final_nb_markers=kwargs["final_nb_markers"], graphic_paths=graphic_paths, ) except IOError: msg = "{}: could not create report".format(report_filename) raise ProgramError(msg)
Creates a LaTeX report. :param report_filename: the name of the file. :param outdirname: the name of the output directory. :type report_filename: str :type outdirname: str
Below is the the instruction that describes the task: ### Input: Creates a LaTeX report. :param report_filename: the name of the file. :param outdirname: the name of the output directory. :type report_filename: str :type outdirname: str ### Response: def create_report(outdirname, report_filename, **kwargs): """Creates a LaTeX report. :param report_filename: the name of the file. :param outdirname: the name of the output directory. :type report_filename: str :type outdirname: str """ # Checking the required variables if "steps" in kwargs: assert "descriptions" in kwargs assert "long_descriptions" in kwargs assert "steps_filename" not in kwargs else: assert "steps_filename" in kwargs assert "descriptions" not in kwargs assert "long_descriptions" not in kwargs assert "summaries" in kwargs assert "background" in kwargs assert "project_name" in kwargs assert "summary_fn" in kwargs assert "report_title" in kwargs assert "report_author" in kwargs assert "initial_files" in kwargs assert "final_nb_markers" in kwargs assert "final_nb_samples" in kwargs assert "final_files" in kwargs assert "plink_version" in kwargs assert "graphic_paths_fn" in kwargs # Formatting the background section background_section = _format_background(kwargs["background"]) # Writing the method steps to a separate file (for access later) steps_filename = None if "steps_filename" in kwargs: steps_filename = kwargs["steps_filename"] else: steps_filename = os.path.join(outdirname, "steps_summary.tex") with open(steps_filename, "w") as o_file: zipped = zip(kwargs["steps"], kwargs["descriptions"], kwargs["long_descriptions"]) for step, desc, long_desc in zipped: if desc.endswith("."): desc = desc[:-1] step = step.replace("_", r"\_") to_print = latex.item(desc) to_print += " [{}].".format(latex.texttt(step)) if long_desc is not None: to_print += " " + long_desc print >>o_file, latex.wrap_lines(to_print) + "\n" # Adding the content of the results section result_summaries = [] for name in kwargs["summaries"]: full_path = os.path.abspath(name) if os.path.isfile(full_path): rel_path = os.path.relpath(full_path, outdirname) result_summaries.append(re.sub(r"\\", "/", rel_path)) # Reading the initial_files file initial_files = None with open(kwargs["initial_files"], "r") as i_file: initial_files = i_file.read().splitlines() # Reading the final_files file final_files = None with open(kwargs["final_files"], "r") as i_file: final_files = [i.split("\t")[0] for i in i_file.read().splitlines()] # Adding the bibliography content biblio_entry = latex.bib_entry( name="pyGenClean", authors="Lemieux Perreault LP, Provost S, Legault MA, Barhdadi A, " r"Dub\'e MP", title="pyGenClean: efficient tool for genetic data clean up before " "association testing", journal="Bioinformatics", year="2013", volume="29", number="13", pages="1704--1705", ) + "\n" * 2 + latex.bib_entry( name="plink", authors="Purcell S, Neale B, Todd-Brown K, Thomas L, Ferreira MAR, " "Bender D, Maller J, Sklar P, de Bakker PIW, Daly MJ, Sham PC", title="PLINK: a tool set for whole-genome association and " "population-based linkage analyses", journal="American Journal of Human Genetics", year="2007", volume="81", number="3", pages="559--575", ) + "\n" * 2 + latex.bib_entry( name="bafRegress", authors=r"Goo J, Matthew F, Kurt NH, Jane MR, Kimberly FD, " r"Gon{\c{c}}alo RA, Michael B, Hyun Min K", title="Detecting and estimating contamination of human DNA samples in " "sequencing and array-based genotype data", journal="The American Journal of Human Genetics", year="2012", volume="91", number="5", pages="839--848", ) # Getting the template main_template = latex.jinja2_env.get_template("main_document.tex") # Getting the data today = datetime.today() # Reading the graphics path graphic_paths = [] if kwargs["graphic_paths_fn"] is not None: with open(kwargs["graphic_paths_fn"], "r") as i_file: graphic_paths = [ re.sub(r"\\", "/", path) + ("" if path.endswith("/") else "/") for path in i_file.read().splitlines() ] try: with open(report_filename, "w") as i_file: # Rendering the template print >>i_file, main_template.render( project_name=latex.sanitize_tex(kwargs["project_name"]), month=today.strftime("%B"), day=today.day, year=today.year, background_content=background_section, result_summaries=result_summaries, bibliography_content=biblio_entry, pygenclean_version=pygenclean_version, plink_version=kwargs["plink_version"], steps_filename=os.path.basename(steps_filename), final_results=_create_summary_table( kwargs["summary_fn"], latex.jinja2_env.get_template("summary_table.tex"), nb_samples=kwargs["final_nb_samples"], nb_markers=kwargs["final_nb_markers"], ), report_title=latex.sanitize_tex(kwargs["report_title"]), report_author=latex.sanitize_tex(kwargs["report_author"]), initial_files=initial_files, final_files=final_files, final_nb_samples=kwargs["final_nb_samples"], final_nb_markers=kwargs["final_nb_markers"], graphic_paths=graphic_paths, ) except IOError: msg = "{}: could not create report".format(report_filename) raise ProgramError(msg)
def namedb_namespace_fields_check( namespace_rec ): """ Given a namespace record, make sure the following fields are present: * namespace_id * buckets Makes the record suitable for insertion/update. NOTE: MODIFIES namespace_rec """ assert namespace_rec.has_key('namespace_id'), "BUG: namespace record has no ID" assert namespace_rec.has_key('buckets'), 'BUG: missing price buckets' assert isinstance(namespace_rec['buckets'], str), 'BUG: namespace data is not in canonical form' return namespace_rec
Given a namespace record, make sure the following fields are present: * namespace_id * buckets Makes the record suitable for insertion/update. NOTE: MODIFIES namespace_rec
Below is the the instruction that describes the task: ### Input: Given a namespace record, make sure the following fields are present: * namespace_id * buckets Makes the record suitable for insertion/update. NOTE: MODIFIES namespace_rec ### Response: def namedb_namespace_fields_check( namespace_rec ): """ Given a namespace record, make sure the following fields are present: * namespace_id * buckets Makes the record suitable for insertion/update. NOTE: MODIFIES namespace_rec """ assert namespace_rec.has_key('namespace_id'), "BUG: namespace record has no ID" assert namespace_rec.has_key('buckets'), 'BUG: missing price buckets' assert isinstance(namespace_rec['buckets'], str), 'BUG: namespace data is not in canonical form' return namespace_rec
def sms_recipients(self): """Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile. """ sms_recipients = [] UserProfile = django_apps.get_model("edc_auth.UserProfile") for user_profile in UserProfile.objects.filter( user__is_active=True, user__is_staff=True ): try: user_profile.sms_notifications.get(name=self.name) except ObjectDoesNotExist: pass else: if user_profile.mobile: sms_recipients.append(user_profile.mobile) return sms_recipients
Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile.
Below is the the instruction that describes the task: ### Input: Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile. ### Response: def sms_recipients(self): """Returns a list of recipients subscribed to receive SMS's for this "notifications" class. See also: edc_auth.UserProfile. """ sms_recipients = [] UserProfile = django_apps.get_model("edc_auth.UserProfile") for user_profile in UserProfile.objects.filter( user__is_active=True, user__is_staff=True ): try: user_profile.sms_notifications.get(name=self.name) except ObjectDoesNotExist: pass else: if user_profile.mobile: sms_recipients.append(user_profile.mobile) return sms_recipients
def _is_logged_in(self): """ Check whether or not the user is logged in. """ # if the user has not logged in in 24 hours, relogin if not self._http._has_session() or datetime.utcnow() >= self._lastlogin + timedelta(hours=24): return self._login() else: return {}
Check whether or not the user is logged in.
Below is the the instruction that describes the task: ### Input: Check whether or not the user is logged in. ### Response: def _is_logged_in(self): """ Check whether or not the user is logged in. """ # if the user has not logged in in 24 hours, relogin if not self._http._has_session() or datetime.utcnow() >= self._lastlogin + timedelta(hours=24): return self._login() else: return {}
def filter_ast(module_ast): """ Filters a given module ast, removing non-whitelisted nodes It allows only the following top level items: - imports - function definitions - class definitions - top level assignments where all the targets on the LHS are all caps """ def node_predicate(node): """ Return true if given node is whitelisted """ for an in ALLOWED_NODES: if isinstance(node, an): return True # Recurse through Assign node LHS targets when an id is not specified, # otherwise check that the id is uppercase if isinstance(node, ast.Assign): return all([node_predicate(t) for t in node.targets if not hasattr(t, 'id')]) \ and all([t.id.isupper() for t in node.targets if hasattr(t, 'id')]) return False module_ast.body = [n for n in module_ast.body if node_predicate(n)] return module_ast
Filters a given module ast, removing non-whitelisted nodes It allows only the following top level items: - imports - function definitions - class definitions - top level assignments where all the targets on the LHS are all caps
Below is the the instruction that describes the task: ### Input: Filters a given module ast, removing non-whitelisted nodes It allows only the following top level items: - imports - function definitions - class definitions - top level assignments where all the targets on the LHS are all caps ### Response: def filter_ast(module_ast): """ Filters a given module ast, removing non-whitelisted nodes It allows only the following top level items: - imports - function definitions - class definitions - top level assignments where all the targets on the LHS are all caps """ def node_predicate(node): """ Return true if given node is whitelisted """ for an in ALLOWED_NODES: if isinstance(node, an): return True # Recurse through Assign node LHS targets when an id is not specified, # otherwise check that the id is uppercase if isinstance(node, ast.Assign): return all([node_predicate(t) for t in node.targets if not hasattr(t, 'id')]) \ and all([t.id.isupper() for t in node.targets if hasattr(t, 'id')]) return False module_ast.body = [n for n in module_ast.body if node_predicate(n)] return module_ast
def starting_offset(source_code, offset): """Return the offset in which the completion should be inserted Usually code assist proposals should be inserted like:: completion = proposal.name result = (source_code[:starting_offset] + completion + source_code[offset:]) Where starting_offset is the offset returned by this function. """ word_finder = worder.Worder(source_code, True) expression, starting, starting_offset = \ word_finder.get_splitted_primary_before(offset) return starting_offset
Return the offset in which the completion should be inserted Usually code assist proposals should be inserted like:: completion = proposal.name result = (source_code[:starting_offset] + completion + source_code[offset:]) Where starting_offset is the offset returned by this function.
Below is the the instruction that describes the task: ### Input: Return the offset in which the completion should be inserted Usually code assist proposals should be inserted like:: completion = proposal.name result = (source_code[:starting_offset] + completion + source_code[offset:]) Where starting_offset is the offset returned by this function. ### Response: def starting_offset(source_code, offset): """Return the offset in which the completion should be inserted Usually code assist proposals should be inserted like:: completion = proposal.name result = (source_code[:starting_offset] + completion + source_code[offset:]) Where starting_offset is the offset returned by this function. """ word_finder = worder.Worder(source_code, True) expression, starting, starting_offset = \ word_finder.get_splitted_primary_before(offset) return starting_offset
def detect_events(self, max_attempts=3): """Returns a list of `Event`s detected from differences in state between the current snapshot and the Kindle Library. `books` and `progress` attributes will be set with the latest API results upon successful completion of the function. Returns: If failed to retrieve progress, None Else, the list of `Event`s """ # Attempt to retrieve current state from KindleAPI for _ in xrange(max_attempts): try: with KindleCloudReaderAPI\ .get_instance(self.uname, self.pword) as kcr: self.books = kcr.get_library_metadata() self.progress = kcr.get_library_progress() except KindleAPIError: continue else: break else: return None # Calculate diffs from new progress progress_map = {book.asin: self.progress[book.asin].locs[1] for book in self.books} new_events = self._snapshot.calc_update_events(progress_map) update_event = UpdateEvent(datetime.now().replace(microsecond=0)) new_events.append(update_event) self._event_buf.extend(new_events) return new_events
Returns a list of `Event`s detected from differences in state between the current snapshot and the Kindle Library. `books` and `progress` attributes will be set with the latest API results upon successful completion of the function. Returns: If failed to retrieve progress, None Else, the list of `Event`s
Below is the the instruction that describes the task: ### Input: Returns a list of `Event`s detected from differences in state between the current snapshot and the Kindle Library. `books` and `progress` attributes will be set with the latest API results upon successful completion of the function. Returns: If failed to retrieve progress, None Else, the list of `Event`s ### Response: def detect_events(self, max_attempts=3): """Returns a list of `Event`s detected from differences in state between the current snapshot and the Kindle Library. `books` and `progress` attributes will be set with the latest API results upon successful completion of the function. Returns: If failed to retrieve progress, None Else, the list of `Event`s """ # Attempt to retrieve current state from KindleAPI for _ in xrange(max_attempts): try: with KindleCloudReaderAPI\ .get_instance(self.uname, self.pword) as kcr: self.books = kcr.get_library_metadata() self.progress = kcr.get_library_progress() except KindleAPIError: continue else: break else: return None # Calculate diffs from new progress progress_map = {book.asin: self.progress[book.asin].locs[1] for book in self.books} new_events = self._snapshot.calc_update_events(progress_map) update_event = UpdateEvent(datetime.now().replace(microsecond=0)) new_events.append(update_event) self._event_buf.extend(new_events) return new_events
def data_filler_user_agent(self, number_of_rows, conn): '''creates and fills the table with user agent data ''' cursor = conn.cursor() cursor.execute(''' CREATE TABLE user_agent(id TEXT PRIMARY KEY, ip TEXT, countrycode TEXT, useragent TEXT) ''') conn.commit() multi_lines = [] try: for i in range(0, number_of_rows): multi_lines.append((rnd_id_generator(self), self.faker.ipv4(), self.faker.country_code(), self.faker.user_agent())) cursor.executemany('insert into user_agent values(?,?,?,?)', multi_lines) conn.commit() logger.warning('user_agent Commits are successful after write job!', extra=d) except Exception as e: logger.error(e, extra=d)
creates and fills the table with user agent data
Below is the the instruction that describes the task: ### Input: creates and fills the table with user agent data ### Response: def data_filler_user_agent(self, number_of_rows, conn): '''creates and fills the table with user agent data ''' cursor = conn.cursor() cursor.execute(''' CREATE TABLE user_agent(id TEXT PRIMARY KEY, ip TEXT, countrycode TEXT, useragent TEXT) ''') conn.commit() multi_lines = [] try: for i in range(0, number_of_rows): multi_lines.append((rnd_id_generator(self), self.faker.ipv4(), self.faker.country_code(), self.faker.user_agent())) cursor.executemany('insert into user_agent values(?,?,?,?)', multi_lines) conn.commit() logger.warning('user_agent Commits are successful after write job!', extra=d) except Exception as e: logger.error(e, extra=d)
def model_argmax(sess, x, predictions, samples, feed=None): """ Helper function that computes the current class prediction :param sess: TF session :param x: the input placeholder :param predictions: the model's symbolic output :param samples: numpy array with input samples (dims must match x) :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :return: the argmax output of predictions, i.e. the current predicted class """ feed_dict = {x: samples} if feed is not None: feed_dict.update(feed) probabilities = sess.run(predictions, feed_dict) if samples.shape[0] == 1: return np.argmax(probabilities) else: return np.argmax(probabilities, axis=1)
Helper function that computes the current class prediction :param sess: TF session :param x: the input placeholder :param predictions: the model's symbolic output :param samples: numpy array with input samples (dims must match x) :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :return: the argmax output of predictions, i.e. the current predicted class
Below is the the instruction that describes the task: ### Input: Helper function that computes the current class prediction :param sess: TF session :param x: the input placeholder :param predictions: the model's symbolic output :param samples: numpy array with input samples (dims must match x) :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :return: the argmax output of predictions, i.e. the current predicted class ### Response: def model_argmax(sess, x, predictions, samples, feed=None): """ Helper function that computes the current class prediction :param sess: TF session :param x: the input placeholder :param predictions: the model's symbolic output :param samples: numpy array with input samples (dims must match x) :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :return: the argmax output of predictions, i.e. the current predicted class """ feed_dict = {x: samples} if feed is not None: feed_dict.update(feed) probabilities = sess.run(predictions, feed_dict) if samples.shape[0] == 1: return np.argmax(probabilities) else: return np.argmax(probabilities, axis=1)
def logmid_n(max_n, ratio=1/4.0, nsteps=15): """ Creates an array of integers that lie evenly spaced in the "middle" of the logarithmic scale from 0 to log(max_n). If max_n is very small and/or nsteps is very large, this may lead to duplicate values which will be removed from the output. This function has benefits in hurst_rs, because it cuts away both very small and very large n, which both can cause problems, and still produces a logarithmically spaced sequence. Args: max_n (int): largest possible output value (should be the sequence length when used in hurst_rs) Kwargs: ratio (float): width of the "middle" of the logarithmic interval relative to log(max_n). For example, for ratio=1/2.0 the logarithm of the resulting values will lie between 0.25 * log(max_n) and 0.75 * log(max_n). nsteps (float): (maximum) number of values to take from the specified range Returns: array of int: a logarithmically spaced sequence of at most nsteps values (may be less, because only unique values are returned) """ l = np.log(max_n) span = l * ratio start = l * (1 - ratio) * 0.5 midrange = start + 1.0*np.arange(nsteps)/nsteps*span nvals = np.round(np.exp(midrange)).astype("int32") return np.unique(nvals)
Creates an array of integers that lie evenly spaced in the "middle" of the logarithmic scale from 0 to log(max_n). If max_n is very small and/or nsteps is very large, this may lead to duplicate values which will be removed from the output. This function has benefits in hurst_rs, because it cuts away both very small and very large n, which both can cause problems, and still produces a logarithmically spaced sequence. Args: max_n (int): largest possible output value (should be the sequence length when used in hurst_rs) Kwargs: ratio (float): width of the "middle" of the logarithmic interval relative to log(max_n). For example, for ratio=1/2.0 the logarithm of the resulting values will lie between 0.25 * log(max_n) and 0.75 * log(max_n). nsteps (float): (maximum) number of values to take from the specified range Returns: array of int: a logarithmically spaced sequence of at most nsteps values (may be less, because only unique values are returned)
Below is the the instruction that describes the task: ### Input: Creates an array of integers that lie evenly spaced in the "middle" of the logarithmic scale from 0 to log(max_n). If max_n is very small and/or nsteps is very large, this may lead to duplicate values which will be removed from the output. This function has benefits in hurst_rs, because it cuts away both very small and very large n, which both can cause problems, and still produces a logarithmically spaced sequence. Args: max_n (int): largest possible output value (should be the sequence length when used in hurst_rs) Kwargs: ratio (float): width of the "middle" of the logarithmic interval relative to log(max_n). For example, for ratio=1/2.0 the logarithm of the resulting values will lie between 0.25 * log(max_n) and 0.75 * log(max_n). nsteps (float): (maximum) number of values to take from the specified range Returns: array of int: a logarithmically spaced sequence of at most nsteps values (may be less, because only unique values are returned) ### Response: def logmid_n(max_n, ratio=1/4.0, nsteps=15): """ Creates an array of integers that lie evenly spaced in the "middle" of the logarithmic scale from 0 to log(max_n). If max_n is very small and/or nsteps is very large, this may lead to duplicate values which will be removed from the output. This function has benefits in hurst_rs, because it cuts away both very small and very large n, which both can cause problems, and still produces a logarithmically spaced sequence. Args: max_n (int): largest possible output value (should be the sequence length when used in hurst_rs) Kwargs: ratio (float): width of the "middle" of the logarithmic interval relative to log(max_n). For example, for ratio=1/2.0 the logarithm of the resulting values will lie between 0.25 * log(max_n) and 0.75 * log(max_n). nsteps (float): (maximum) number of values to take from the specified range Returns: array of int: a logarithmically spaced sequence of at most nsteps values (may be less, because only unique values are returned) """ l = np.log(max_n) span = l * ratio start = l * (1 - ratio) * 0.5 midrange = start + 1.0*np.arange(nsteps)/nsteps*span nvals = np.round(np.exp(midrange)).astype("int32") return np.unique(nvals)
def any_positiveinteger_field(field, **kwargs): """ An positive integer >>> result = any_field(models.PositiveIntegerField()) >>> type(result) <type 'int'> >>> result > 0 True """ min_value = kwargs.get('min_value', 1) max_value = kwargs.get('max_value', 9999) return xunit.any_int(min_value=min_value, max_value=max_value)
An positive integer >>> result = any_field(models.PositiveIntegerField()) >>> type(result) <type 'int'> >>> result > 0 True
Below is the the instruction that describes the task: ### Input: An positive integer >>> result = any_field(models.PositiveIntegerField()) >>> type(result) <type 'int'> >>> result > 0 True ### Response: def any_positiveinteger_field(field, **kwargs): """ An positive integer >>> result = any_field(models.PositiveIntegerField()) >>> type(result) <type 'int'> >>> result > 0 True """ min_value = kwargs.get('min_value', 1) max_value = kwargs.get('max_value', 9999) return xunit.any_int(min_value=min_value, max_value=max_value)
def takeStereoScreenshot(self, pchPreviewFilename, pchVRFilename): """ Tells the compositor to take an internal screenshot of type VRScreenshotType_Stereo. It will take the current submitted scene textures of the running application and write them into the preview image and a side-by-side file for the VR image. This is similar to request screenshot, but doesn't ever talk to the application, just takes the shot and submits. """ fn = self.function_table.takeStereoScreenshot pOutScreenshotHandle = ScreenshotHandle_t() result = fn(byref(pOutScreenshotHandle), pchPreviewFilename, pchVRFilename) return result, pOutScreenshotHandle
Tells the compositor to take an internal screenshot of type VRScreenshotType_Stereo. It will take the current submitted scene textures of the running application and write them into the preview image and a side-by-side file for the VR image. This is similar to request screenshot, but doesn't ever talk to the application, just takes the shot and submits.
Below is the the instruction that describes the task: ### Input: Tells the compositor to take an internal screenshot of type VRScreenshotType_Stereo. It will take the current submitted scene textures of the running application and write them into the preview image and a side-by-side file for the VR image. This is similar to request screenshot, but doesn't ever talk to the application, just takes the shot and submits. ### Response: def takeStereoScreenshot(self, pchPreviewFilename, pchVRFilename): """ Tells the compositor to take an internal screenshot of type VRScreenshotType_Stereo. It will take the current submitted scene textures of the running application and write them into the preview image and a side-by-side file for the VR image. This is similar to request screenshot, but doesn't ever talk to the application, just takes the shot and submits. """ fn = self.function_table.takeStereoScreenshot pOutScreenshotHandle = ScreenshotHandle_t() result = fn(byref(pOutScreenshotHandle), pchPreviewFilename, pchVRFilename) return result, pOutScreenshotHandle
def concretize_read_addr(self, addr, strategies=None): """ Concretizes an address meant for reading. :param addr: An expression for the address. :param strategies: A list of concretization strategies (to override the default). :returns: A list of concrete addresses. """ if isinstance(addr, int): return [ addr ] elif not self.state.solver.symbolic(addr): return [ self.state.solver.eval(addr) ] strategies = self.read_strategies if strategies is None else strategies return self._apply_concretization_strategies(addr, strategies, 'load')
Concretizes an address meant for reading. :param addr: An expression for the address. :param strategies: A list of concretization strategies (to override the default). :returns: A list of concrete addresses.
Below is the the instruction that describes the task: ### Input: Concretizes an address meant for reading. :param addr: An expression for the address. :param strategies: A list of concretization strategies (to override the default). :returns: A list of concrete addresses. ### Response: def concretize_read_addr(self, addr, strategies=None): """ Concretizes an address meant for reading. :param addr: An expression for the address. :param strategies: A list of concretization strategies (to override the default). :returns: A list of concrete addresses. """ if isinstance(addr, int): return [ addr ] elif not self.state.solver.symbolic(addr): return [ self.state.solver.eval(addr) ] strategies = self.read_strategies if strategies is None else strategies return self._apply_concretization_strategies(addr, strategies, 'load')
def get_method(self, name): """ Get registered method callend `name`. """ try: return self.funcs[name] except KeyError: try: return self.instance._get_method(name) except AttributeError: return SimpleXMLRPCServer.resolve_dotted_attribute( self.instance, name, self.allow_dotted_names)
Get registered method callend `name`.
Below is the the instruction that describes the task: ### Input: Get registered method callend `name`. ### Response: def get_method(self, name): """ Get registered method callend `name`. """ try: return self.funcs[name] except KeyError: try: return self.instance._get_method(name) except AttributeError: return SimpleXMLRPCServer.resolve_dotted_attribute( self.instance, name, self.allow_dotted_names)
def get_logger(name): """Get a logging instance we can use.""" import logging import sys logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) shandler = logging.StreamHandler(sys.stdout) fmt = "" fmt += '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():' fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s' fmtr = logging.Formatter(fmt) shandler.setFormatter(fmtr) logger.addHandler(shandler) return logger
Get a logging instance we can use.
Below is the the instruction that describes the task: ### Input: Get a logging instance we can use. ### Response: def get_logger(name): """Get a logging instance we can use.""" import logging import sys logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) shandler = logging.StreamHandler(sys.stdout) fmt = "" fmt += '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():' fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s' fmtr = logging.Formatter(fmt) shandler.setFormatter(fmtr) logger.addHandler(shandler) return logger
def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): """Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary. """ if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): return if uf["location"] in fileindex: uf["location"] = fileindex[uf["location"]] return if not uf["location"] and uf["path"]: uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"]) if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]): if skip_broken: return else: raise cwltool.errors.WorkflowException( "File is missing: %s" % uf["location"]) uf["location"] = write_file( uploadfunc, fileindex, existing, uf["location"])
Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary.
Below is the the instruction that describes the task: ### Input: Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary. ### Response: def uploadFile(uploadfunc, fileindex, existing, uf, skip_broken=False): """Update a file object so that the location is a reference to the toil file store, writing it to the file store if necessary. """ if uf["location"].startswith("toilfs:") or uf["location"].startswith("_:"): return if uf["location"] in fileindex: uf["location"] = fileindex[uf["location"]] return if not uf["location"] and uf["path"]: uf["location"] = schema_salad.ref_resolver.file_uri(uf["path"]) if uf["location"].startswith("file://") and not os.path.isfile(uf["location"][7:]): if skip_broken: return else: raise cwltool.errors.WorkflowException( "File is missing: %s" % uf["location"]) uf["location"] = write_file( uploadfunc, fileindex, existing, uf["location"])
def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None): """ Add an entry to the system crontab. """ raise NotImplementedError
Add an entry to the system crontab.
Below is the the instruction that describes the task: ### Input: Add an entry to the system crontab. ### Response: def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None): """ Add an entry to the system crontab. """ raise NotImplementedError