code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def update(self, aspect_ratio=None, fov=None, near=None, far=None): """ Update the internal projection matrix based on current values or values passed in if specified. :param aspect_ratio: New aspect ratio :param fov: New field of view :param near: New near value :param far: New far value """ self.aspect_ratio = aspect_ratio or self.aspect_ratio self.fov = fov or self.fov self.near = near or self.near self.far = far or self.far self.matrix = Matrix44.perspective_projection(self.fov, self.aspect_ratio, self.near, self.far)
Update the internal projection matrix based on current values or values passed in if specified. :param aspect_ratio: New aspect ratio :param fov: New field of view :param near: New near value :param far: New far value
Below is the the instruction that describes the task: ### Input: Update the internal projection matrix based on current values or values passed in if specified. :param aspect_ratio: New aspect ratio :param fov: New field of view :param near: New near value :param far: New far value ### Response: def update(self, aspect_ratio=None, fov=None, near=None, far=None): """ Update the internal projection matrix based on current values or values passed in if specified. :param aspect_ratio: New aspect ratio :param fov: New field of view :param near: New near value :param far: New far value """ self.aspect_ratio = aspect_ratio or self.aspect_ratio self.fov = fov or self.fov self.near = near or self.near self.far = far or self.far self.matrix = Matrix44.perspective_projection(self.fov, self.aspect_ratio, self.near, self.far)
def parse(duration_seconds, resolution_seconds=Resolution.MAX_RESOLUTION, limit=None): """ num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return """ if not duration_seconds or duration_seconds < 0: return 0 if not resolution_seconds or resolution_seconds <= 0: return None num_datapoints = duration_seconds / resolution_seconds if limit: num_datapoints = min(int(limit), num_datapoints) return int(math.ceil(num_datapoints))
num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return
Below is the the instruction that describes the task: ### Input: num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return ### Response: def parse(duration_seconds, resolution_seconds=Resolution.MAX_RESOLUTION, limit=None): """ num_datapoints = min(duration/resolution, limit) :param duration_seconds: Time duration (in seconds) for which datapoints should be returned :param resolution_seconds: Time interval (in seconds) between data points :param limit: Maximum number of datapoints to return """ if not duration_seconds or duration_seconds < 0: return 0 if not resolution_seconds or resolution_seconds <= 0: return None num_datapoints = duration_seconds / resolution_seconds if limit: num_datapoints = min(int(limit), num_datapoints) return int(math.ceil(num_datapoints))
def open(self, host, port=23): """Opens a telnet connection to the desired AttenuatorDevice and queries basic information. Args: host: A valid hostname (IP address or DNS-resolvable name) to an MC-DAT attenuator instrument. port: An optional port number (defaults to telnet default 23) """ self._telnet_client.open(host, port) config_str = self._telnet_client.cmd("MN?") if config_str.startswith("MN="): config_str = config_str[len("MN="):] self.properties = dict( zip(['model', 'max_freq', 'max_atten'], config_str.split("-", 2))) self.max_atten = float(self.properties['max_atten'])
Opens a telnet connection to the desired AttenuatorDevice and queries basic information. Args: host: A valid hostname (IP address or DNS-resolvable name) to an MC-DAT attenuator instrument. port: An optional port number (defaults to telnet default 23)
Below is the the instruction that describes the task: ### Input: Opens a telnet connection to the desired AttenuatorDevice and queries basic information. Args: host: A valid hostname (IP address or DNS-resolvable name) to an MC-DAT attenuator instrument. port: An optional port number (defaults to telnet default 23) ### Response: def open(self, host, port=23): """Opens a telnet connection to the desired AttenuatorDevice and queries basic information. Args: host: A valid hostname (IP address or DNS-resolvable name) to an MC-DAT attenuator instrument. port: An optional port number (defaults to telnet default 23) """ self._telnet_client.open(host, port) config_str = self._telnet_client.cmd("MN?") if config_str.startswith("MN="): config_str = config_str[len("MN="):] self.properties = dict( zip(['model', 'max_freq', 'max_atten'], config_str.split("-", 2))) self.max_atten = float(self.properties['max_atten'])
def _binning(values, limits=(0,0), bin_num=10): ''' Bins data that falls between certain limits, if the limits are (0, 0) the minimum and maximum values are used. Returns a list of tuples where the first element of the tuple is the center of the bin and the second element of the tuple are the counts. ''' if limits == (0, 0): eps = 1.0/sys.maxint min_val, max_val = min(values) - eps, max(values) + eps else: min_val, max_val = limits # get bin size bin_size = (max_val - min_val)/float(bin_num) bins = [0] * (bin_num) # will ignore these outliers for now out_points = 0 for value in values: try: if (value - min_val) < 0: out_points += 1 else: index = int((value - min_val)/float(bin_size)) bins[index] += 1 except IndexError: out_points += 1 # make it ready for an x,y plot result = [] center = (bin_size/2) + min_val for i, y in enumerate(bins): x = center + bin_size * i result.append( (x,y) ) return result
Bins data that falls between certain limits, if the limits are (0, 0) the minimum and maximum values are used. Returns a list of tuples where the first element of the tuple is the center of the bin and the second element of the tuple are the counts.
Below is the the instruction that describes the task: ### Input: Bins data that falls between certain limits, if the limits are (0, 0) the minimum and maximum values are used. Returns a list of tuples where the first element of the tuple is the center of the bin and the second element of the tuple are the counts. ### Response: def _binning(values, limits=(0,0), bin_num=10): ''' Bins data that falls between certain limits, if the limits are (0, 0) the minimum and maximum values are used. Returns a list of tuples where the first element of the tuple is the center of the bin and the second element of the tuple are the counts. ''' if limits == (0, 0): eps = 1.0/sys.maxint min_val, max_val = min(values) - eps, max(values) + eps else: min_val, max_val = limits # get bin size bin_size = (max_val - min_val)/float(bin_num) bins = [0] * (bin_num) # will ignore these outliers for now out_points = 0 for value in values: try: if (value - min_val) < 0: out_points += 1 else: index = int((value - min_val)/float(bin_size)) bins[index] += 1 except IndexError: out_points += 1 # make it ready for an x,y plot result = [] center = (bin_size/2) + min_val for i, y in enumerate(bins): x = center + bin_size * i result.append( (x,y) ) return result
def _patch_hover(self, element, data): """ Replace edge start and end hover data with label_index data. """ if not (self.inspection_policy == 'edges' and 'hover' in self.handles): return lidx = element.nodes.get_dimension(self.label_index) src, tgt = [dimension_sanitizer(kd.name) for kd in element.kdims[:2]] if src == 'start': src += '_values' if tgt == 'end': tgt += '_values' lookup = dict(zip(*(element.nodes.dimension_values(d) for d in (2, lidx)))) src_vals = data['patches_1'][src] tgt_vals = data['patches_1'][tgt] data['patches_1'][src] = [lookup.get(v, v) for v in src_vals] data['patches_1'][tgt] = [lookup.get(v, v) for v in tgt_vals]
Replace edge start and end hover data with label_index data.
Below is the the instruction that describes the task: ### Input: Replace edge start and end hover data with label_index data. ### Response: def _patch_hover(self, element, data): """ Replace edge start and end hover data with label_index data. """ if not (self.inspection_policy == 'edges' and 'hover' in self.handles): return lidx = element.nodes.get_dimension(self.label_index) src, tgt = [dimension_sanitizer(kd.name) for kd in element.kdims[:2]] if src == 'start': src += '_values' if tgt == 'end': tgt += '_values' lookup = dict(zip(*(element.nodes.dimension_values(d) for d in (2, lidx)))) src_vals = data['patches_1'][src] tgt_vals = data['patches_1'][tgt] data['patches_1'][src] = [lookup.get(v, v) for v in src_vals] data['patches_1'][tgt] = [lookup.get(v, v) for v in tgt_vals]
def parse_gpr(str_expr): """parse gpr into AST Parameters ---------- str_expr : string string with the gene reaction rule to parse Returns ------- tuple elements ast_tree and gene_ids as a set """ str_expr = str_expr.strip() if len(str_expr) == 0: return None, set() for char, escaped in replacements: if char in str_expr: str_expr = str_expr.replace(char, escaped) escaped_str = keyword_re.sub("__cobra_escape__", str_expr) escaped_str = number_start_re.sub("__cobra_escape__", escaped_str) tree = ast_parse(escaped_str, "<string>", "eval") cleaner = GPRCleaner() cleaner.visit(tree) eval_gpr(tree, set()) # ensure the rule can be evaluated return tree, cleaner.gene_set
parse gpr into AST Parameters ---------- str_expr : string string with the gene reaction rule to parse Returns ------- tuple elements ast_tree and gene_ids as a set
Below is the the instruction that describes the task: ### Input: parse gpr into AST Parameters ---------- str_expr : string string with the gene reaction rule to parse Returns ------- tuple elements ast_tree and gene_ids as a set ### Response: def parse_gpr(str_expr): """parse gpr into AST Parameters ---------- str_expr : string string with the gene reaction rule to parse Returns ------- tuple elements ast_tree and gene_ids as a set """ str_expr = str_expr.strip() if len(str_expr) == 0: return None, set() for char, escaped in replacements: if char in str_expr: str_expr = str_expr.replace(char, escaped) escaped_str = keyword_re.sub("__cobra_escape__", str_expr) escaped_str = number_start_re.sub("__cobra_escape__", escaped_str) tree = ast_parse(escaped_str, "<string>", "eval") cleaner = GPRCleaner() cleaner.visit(tree) eval_gpr(tree, set()) # ensure the rule can be evaluated return tree, cleaner.gene_set
def resource_list(cls): """ Get the possible list of resources (name, id). """ items = cls.list() ret = [vlan['name'] for vlan in items] ret.extend([str(vlan['id']) for vlan in items]) return ret
Get the possible list of resources (name, id).
Below is the the instruction that describes the task: ### Input: Get the possible list of resources (name, id). ### Response: def resource_list(cls): """ Get the possible list of resources (name, id). """ items = cls.list() ret = [vlan['name'] for vlan in items] ret.extend([str(vlan['id']) for vlan in items]) return ret
def selectAll(self): """ Selects the text within all the editors. """ self.blockEditorHandling(True) for editor in self.editors(): editor.selectAll() self.blockEditorHandling(False)
Selects the text within all the editors.
Below is the the instruction that describes the task: ### Input: Selects the text within all the editors. ### Response: def selectAll(self): """ Selects the text within all the editors. """ self.blockEditorHandling(True) for editor in self.editors(): editor.selectAll() self.blockEditorHandling(False)
def list_experiment(args): '''Get experiment information''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, _ = check_rest_server_quick(rest_port) if running: response = rest_get(experiment_url(rest_port), REST_TIME_OUT) if response and check_response(response): content = convert_time_stamp_to_date(json.loads(response.text)) print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':'))) else: print_error('List experiment failed...') else: print_error('Restful server is not running...')
Get experiment information
Below is the the instruction that describes the task: ### Input: Get experiment information ### Response: def list_experiment(args): '''Get experiment information''' nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if not detect_process(rest_pid): print_error('Experiment is not running...') return running, _ = check_rest_server_quick(rest_port) if running: response = rest_get(experiment_url(rest_port), REST_TIME_OUT) if response and check_response(response): content = convert_time_stamp_to_date(json.loads(response.text)) print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':'))) else: print_error('List experiment failed...') else: print_error('Restful server is not running...')
def multihead_attention(query_antecedent, memory_antecedent, bias, total_key_depth, total_value_depth, output_depth, num_heads, dropout_rate, attention_type="dot_product", max_relative_position=None, heads_share_relative_embedding=False, add_relative_to_values=False, image_shapes=None, block_length=128, block_width=128, q_filter_width=1, kv_filter_width=1, q_padding="VALID", kv_padding="VALID", cache=None, gap_size=0, num_memory_blocks=2, name="multihead_attention", save_weights_to=None, make_image_summary=True, dropout_broadcast_dims=None, vars_3d=False, layer_collection=None, recurrent_memory=None, chunk_number=None, hard_attention_k=0, max_area_width=1, max_area_height=1, memory_height=1, area_key_mode="mean", area_value_mode="sum", training=True, **kwargs): """Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) max_relative_position: Maximum distance between inputs to generate unique relation embeddings for. Only relevant when using "dot_product_relative" attention. heads_share_relative_embedding: boolean to share relative embeddings add_relative_to_values: a boolean for whether to add relative component to values. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() block_length: an integer - relevant for "local_mask_right" block_width: an integer - relevant for "local_unmasked" q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": no padding. cache: dict containing Tensors which are the results of previous attentions, used for fast decoding. Expects the dict to contrain two keys ('k' and 'v'), for the initial call the values for these keys should be empty Tensors of the appropriate shape. 'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels] gap_size: Integer option for dilated attention to indicate spacing between memory blocks. num_memory_blocks: Integer option to indicate how many memory blocks to look at. name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. vars_3d: use 3-dimensional variables for input/output transformations layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory: An optional transformer_memory.RecurrentMemory, which retains state across chunks. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. memory_height: the height of the memory. area_key_mode: the mode for computing area keys, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". area_value_mode: the mode for computing area values, which can be either "mean", or "sum". training: indicating if it is in the training mode. **kwargs (dict): Parameters for the attention function. Caching: WARNING: For decoder self-attention, i.e. when memory_antecedent == None, the caching assumes that the bias contains future masking. The caching works by saving all the previous key and value values so that you are able to send just the last query location to this attention function. I.e. if the cache dict is provided it assumes the query is of the shape [batch_size, 1, hidden_dim] rather than the full memory. Returns: The result of the attention transformation. The output shape is [batch_size, length_q, hidden_dim] unless the cache dict is provided in which case only the last memory position is calculated and the output shape is [batch_size, 1, hidden_dim] Optionally returns an additional loss parameters (ex: load balance loss for the experts) returned by the attention_type function. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) vars_3d_num_heads = num_heads if vars_3d else 0 if layer_collection is not None: if cache is not None: raise ValueError("KFAC implementation only supports cache is None.") if vars_3d: raise ValueError("KFAC implementation does not support 3d vars.") if recurrent_memory is not None: if memory_antecedent is not None: raise ValueError("Recurrent memory requires memory_antecedent is None.") if cache is not None: raise ValueError("Cache is not supported when using recurrent memory.") if vars_3d: raise ValueError("3d vars are not supported when using recurrent memory.") if layer_collection is not None: raise ValueError("KFAC is not supported when using recurrent memory.") if chunk_number is None: raise ValueError("chunk_number is required when using recurrent memory.") with tf.variable_scope(name, default_name="multihead_attention", values=[query_antecedent, memory_antecedent]): if recurrent_memory is not None: ( recurrent_memory_transaction, query_antecedent, memory_antecedent, bias, ) = recurrent_memory.pre_attention( chunk_number, query_antecedent, memory_antecedent, bias, ) if cache is None or memory_antecedent is None: q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width, kv_filter_width, q_padding, kv_padding, vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) if cache is not None: if attention_type not in ["dot_product", "dot_product_relative"]: # TODO(petershaw): Support caching when using relative position # representations, i.e. "dot_product_relative" attention. raise NotImplementedError( "Caching is not guaranteed to work with attention types other than" " dot_product.") if bias is None: raise ValueError("Bias required for caching. See function docstring " "for details.") if memory_antecedent is not None: # Encoder-Decoder Attention Cache q = compute_attention_component(query_antecedent, total_key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads=vars_3d_num_heads) k = cache["k_encdec"] v = cache["v_encdec"] else: k = split_heads(k, num_heads) v = split_heads(v, num_heads) decode_loop_step = kwargs.get("decode_loop_step") if decode_loop_step is None: k = cache["k"] = tf.concat([cache["k"], k], axis=2) v = cache["v"] = tf.concat([cache["v"], v], axis=2) else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. # The performance of current implementation is better than updating # the tensor by adding the result of matmul(one_hot, # update_in_current_step) tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3]) tmp_k = inplace_ops.alias_inplace_update( tmp_k, decode_loop_step, tf.squeeze(k, axis=2)) k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3]) tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3]) tmp_v = inplace_ops.alias_inplace_update( tmp_v, decode_loop_step, tf.squeeze(v, axis=2)) v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3]) q = split_heads(q, num_heads) if cache is None: k = split_heads(k, num_heads) v = split_heads(v, num_heads) key_depth_per_head = total_key_depth // num_heads if not vars_3d: q *= key_depth_per_head**-0.5 additional_returned_value = None if callable(attention_type): # Generic way to extend multihead_attention x = attention_type(q, k, v, **kwargs) if isinstance(x, tuple): x, additional_returned_value = x # Unpack elif attention_type == "dot_product": if max_area_width > 1 or max_area_height > 1: x = area_attention.dot_product_area_attention( q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, dropout_broadcast_dims=dropout_broadcast_dims, max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=area_key_mode, area_value_mode=area_value_mode, training=training) else: x = dot_product_attention(q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, activation_dtype=kwargs.get( "activation_dtype"), hard_attention_k=hard_attention_k) elif attention_type == "dot_product_relative": x = dot_product_attention_relative( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, cache=cache is not None, allow_memory=recurrent_memory is not None, hard_attention_k=hard_attention_k) elif attention_type == "dot_product_unmasked_relative_v2": x = dot_product_unmasked_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "dot_product_relative_v2": x = dot_product_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "local_within_block_mask_right": x = masked_within_block_local_attention_1d( q, k, v, block_length=block_length) elif attention_type == "local_relative_mask_right": x = masked_relative_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary, dropout_rate=dropout_rate, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values, name="masked_relative_local_attention_1d") elif attention_type == "local_mask_right": x = masked_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary) elif attention_type == "local_unmasked": x = local_attention_1d( q, k, v, block_length=block_length, filter_width=block_width) elif attention_type == "masked_dilated_1d": x = masked_dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) else: assert attention_type == "unmasked_dilated_1d" x = dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) x = combine_heads(x) # Set last dim specifically. x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) if vars_3d: o_var = tf.get_variable( "o", [num_heads, total_value_depth // num_heads, output_depth]) o_var = tf.cast(o_var, x.dtype) o_var = tf.reshape(o_var, [total_value_depth, output_depth]) x = tf.tensordot(x, o_var, axes=1) else: x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform", layer_collection=layer_collection) if recurrent_memory is not None: x = recurrent_memory.post_attention(recurrent_memory_transaction, x) if additional_returned_value is not None: return x, additional_returned_value return x
Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) max_relative_position: Maximum distance between inputs to generate unique relation embeddings for. Only relevant when using "dot_product_relative" attention. heads_share_relative_embedding: boolean to share relative embeddings add_relative_to_values: a boolean for whether to add relative component to values. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() block_length: an integer - relevant for "local_mask_right" block_width: an integer - relevant for "local_unmasked" q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": no padding. cache: dict containing Tensors which are the results of previous attentions, used for fast decoding. Expects the dict to contrain two keys ('k' and 'v'), for the initial call the values for these keys should be empty Tensors of the appropriate shape. 'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels] gap_size: Integer option for dilated attention to indicate spacing between memory blocks. num_memory_blocks: Integer option to indicate how many memory blocks to look at. name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. vars_3d: use 3-dimensional variables for input/output transformations layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory: An optional transformer_memory.RecurrentMemory, which retains state across chunks. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. memory_height: the height of the memory. area_key_mode: the mode for computing area keys, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". area_value_mode: the mode for computing area values, which can be either "mean", or "sum". training: indicating if it is in the training mode. **kwargs (dict): Parameters for the attention function. Caching: WARNING: For decoder self-attention, i.e. when memory_antecedent == None, the caching assumes that the bias contains future masking. The caching works by saving all the previous key and value values so that you are able to send just the last query location to this attention function. I.e. if the cache dict is provided it assumes the query is of the shape [batch_size, 1, hidden_dim] rather than the full memory. Returns: The result of the attention transformation. The output shape is [batch_size, length_q, hidden_dim] unless the cache dict is provided in which case only the last memory position is calculated and the output shape is [batch_size, 1, hidden_dim] Optionally returns an additional loss parameters (ex: load balance loss for the experts) returned by the attention_type function. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads.
Below is the the instruction that describes the task: ### Input: Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) max_relative_position: Maximum distance between inputs to generate unique relation embeddings for. Only relevant when using "dot_product_relative" attention. heads_share_relative_embedding: boolean to share relative embeddings add_relative_to_values: a boolean for whether to add relative component to values. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() block_length: an integer - relevant for "local_mask_right" block_width: an integer - relevant for "local_unmasked" q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": no padding. cache: dict containing Tensors which are the results of previous attentions, used for fast decoding. Expects the dict to contrain two keys ('k' and 'v'), for the initial call the values for these keys should be empty Tensors of the appropriate shape. 'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels] gap_size: Integer option for dilated attention to indicate spacing between memory blocks. num_memory_blocks: Integer option to indicate how many memory blocks to look at. name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. vars_3d: use 3-dimensional variables for input/output transformations layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory: An optional transformer_memory.RecurrentMemory, which retains state across chunks. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. memory_height: the height of the memory. area_key_mode: the mode for computing area keys, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". area_value_mode: the mode for computing area values, which can be either "mean", or "sum". training: indicating if it is in the training mode. **kwargs (dict): Parameters for the attention function. Caching: WARNING: For decoder self-attention, i.e. when memory_antecedent == None, the caching assumes that the bias contains future masking. The caching works by saving all the previous key and value values so that you are able to send just the last query location to this attention function. I.e. if the cache dict is provided it assumes the query is of the shape [batch_size, 1, hidden_dim] rather than the full memory. Returns: The result of the attention transformation. The output shape is [batch_size, length_q, hidden_dim] unless the cache dict is provided in which case only the last memory position is calculated and the output shape is [batch_size, 1, hidden_dim] Optionally returns an additional loss parameters (ex: load balance loss for the experts) returned by the attention_type function. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. ### Response: def multihead_attention(query_antecedent, memory_antecedent, bias, total_key_depth, total_value_depth, output_depth, num_heads, dropout_rate, attention_type="dot_product", max_relative_position=None, heads_share_relative_embedding=False, add_relative_to_values=False, image_shapes=None, block_length=128, block_width=128, q_filter_width=1, kv_filter_width=1, q_padding="VALID", kv_padding="VALID", cache=None, gap_size=0, num_memory_blocks=2, name="multihead_attention", save_weights_to=None, make_image_summary=True, dropout_broadcast_dims=None, vars_3d=False, layer_collection=None, recurrent_memory=None, chunk_number=None, hard_attention_k=0, max_area_width=1, max_area_height=1, memory_height=1, area_key_mode="mean", area_value_mode="sum", training=True, **kwargs): """Multihead scaled-dot-product attention with input/output transformations. Args: query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: a Tensor with shape [batch, length_m, channels] or None bias: bias Tensor (see attention_bias()) total_key_depth: an integer total_value_depth: an integer output_depth: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number attention_type: a string, either "dot_product", "dot_product_relative", "local_mask_right", "local_unmasked", "masked_dilated_1d", "unmasked_dilated_1d", graph, or any attention function with the signature (query, key, value, **kwargs) max_relative_position: Maximum distance between inputs to generate unique relation embeddings for. Only relevant when using "dot_product_relative" attention. heads_share_relative_embedding: boolean to share relative embeddings add_relative_to_values: a boolean for whether to add relative component to values. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() block_length: an integer - relevant for "local_mask_right" block_width: an integer - relevant for "local_unmasked" q_filter_width: An integer specifying how wide you want the query to be. kv_filter_width: An integer specifying how wide you want the keys and values to be. q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": no padding. cache: dict containing Tensors which are the results of previous attentions, used for fast decoding. Expects the dict to contrain two keys ('k' and 'v'), for the initial call the values for these keys should be empty Tensors of the appropriate shape. 'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels] gap_size: Integer option for dilated attention to indicate spacing between memory blocks. num_memory_blocks: Integer option to indicate how many memory blocks to look at. name: an optional string. save_weights_to: an optional dictionary to capture attention weights for vizualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. dropout_broadcast_dims: an optional list of integers less than 4 specifying in which dimensions to broadcast the dropout decisions. saves memory. vars_3d: use 3-dimensional variables for input/output transformations layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC optimizer. Default is None. recurrent_memory: An optional transformer_memory.RecurrentMemory, which retains state across chunks. Default is None. chunk_number: an optional integer Tensor with shape [batch] used to operate the recurrent_memory. hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). max_area_width: the max width allowed for an area. max_area_height: the max height allowed for an area. memory_height: the height of the memory. area_key_mode: the mode for computing area keys, which can be "mean", "concat", "sum", "sample_concat", and "sample_sum". area_value_mode: the mode for computing area values, which can be either "mean", or "sum". training: indicating if it is in the training mode. **kwargs (dict): Parameters for the attention function. Caching: WARNING: For decoder self-attention, i.e. when memory_antecedent == None, the caching assumes that the bias contains future masking. The caching works by saving all the previous key and value values so that you are able to send just the last query location to this attention function. I.e. if the cache dict is provided it assumes the query is of the shape [batch_size, 1, hidden_dim] rather than the full memory. Returns: The result of the attention transformation. The output shape is [batch_size, length_q, hidden_dim] unless the cache dict is provided in which case only the last memory position is calculated and the output shape is [batch_size, 1, hidden_dim] Optionally returns an additional loss parameters (ex: load balance loss for the experts) returned by the attention_type function. Raises: ValueError: if the key depth or value depth are not divisible by the number of attention heads. """ if total_key_depth % num_heads != 0: raise ValueError("Key depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_key_depth, num_heads)) if total_value_depth % num_heads != 0: raise ValueError("Value depth (%d) must be divisible by the number of " "attention heads (%d)." % (total_value_depth, num_heads)) vars_3d_num_heads = num_heads if vars_3d else 0 if layer_collection is not None: if cache is not None: raise ValueError("KFAC implementation only supports cache is None.") if vars_3d: raise ValueError("KFAC implementation does not support 3d vars.") if recurrent_memory is not None: if memory_antecedent is not None: raise ValueError("Recurrent memory requires memory_antecedent is None.") if cache is not None: raise ValueError("Cache is not supported when using recurrent memory.") if vars_3d: raise ValueError("3d vars are not supported when using recurrent memory.") if layer_collection is not None: raise ValueError("KFAC is not supported when using recurrent memory.") if chunk_number is None: raise ValueError("chunk_number is required when using recurrent memory.") with tf.variable_scope(name, default_name="multihead_attention", values=[query_antecedent, memory_antecedent]): if recurrent_memory is not None: ( recurrent_memory_transaction, query_antecedent, memory_antecedent, bias, ) = recurrent_memory.pre_attention( chunk_number, query_antecedent, memory_antecedent, bias, ) if cache is None or memory_antecedent is None: q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, total_value_depth, q_filter_width, kv_filter_width, q_padding, kv_padding, vars_3d_num_heads=vars_3d_num_heads, layer_collection=layer_collection) if cache is not None: if attention_type not in ["dot_product", "dot_product_relative"]: # TODO(petershaw): Support caching when using relative position # representations, i.e. "dot_product_relative" attention. raise NotImplementedError( "Caching is not guaranteed to work with attention types other than" " dot_product.") if bias is None: raise ValueError("Bias required for caching. See function docstring " "for details.") if memory_antecedent is not None: # Encoder-Decoder Attention Cache q = compute_attention_component(query_antecedent, total_key_depth, q_filter_width, q_padding, "q", vars_3d_num_heads=vars_3d_num_heads) k = cache["k_encdec"] v = cache["v_encdec"] else: k = split_heads(k, num_heads) v = split_heads(v, num_heads) decode_loop_step = kwargs.get("decode_loop_step") if decode_loop_step is None: k = cache["k"] = tf.concat([cache["k"], k], axis=2) v = cache["v"] = tf.concat([cache["v"], v], axis=2) else: # Inplace update is required for inference on TPU. # Inplace_ops only supports inplace_update on the first dimension. # The performance of current implementation is better than updating # the tensor by adding the result of matmul(one_hot, # update_in_current_step) tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3]) tmp_k = inplace_ops.alias_inplace_update( tmp_k, decode_loop_step, tf.squeeze(k, axis=2)) k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3]) tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3]) tmp_v = inplace_ops.alias_inplace_update( tmp_v, decode_loop_step, tf.squeeze(v, axis=2)) v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3]) q = split_heads(q, num_heads) if cache is None: k = split_heads(k, num_heads) v = split_heads(v, num_heads) key_depth_per_head = total_key_depth // num_heads if not vars_3d: q *= key_depth_per_head**-0.5 additional_returned_value = None if callable(attention_type): # Generic way to extend multihead_attention x = attention_type(q, k, v, **kwargs) if isinstance(x, tuple): x, additional_returned_value = x # Unpack elif attention_type == "dot_product": if max_area_width > 1 or max_area_height > 1: x = area_attention.dot_product_area_attention( q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, dropout_broadcast_dims=dropout_broadcast_dims, max_area_width=max_area_width, max_area_height=max_area_height, memory_height=memory_height, area_key_mode=area_key_mode, area_value_mode=area_value_mode, training=training) else: x = dot_product_attention(q, k, v, bias, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, activation_dtype=kwargs.get( "activation_dtype"), hard_attention_k=hard_attention_k) elif attention_type == "dot_product_relative": x = dot_product_attention_relative( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, save_weights_to=save_weights_to, make_image_summary=make_image_summary, cache=cache is not None, allow_memory=recurrent_memory is not None, hard_attention_k=hard_attention_k) elif attention_type == "dot_product_unmasked_relative_v2": x = dot_product_unmasked_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "dot_product_relative_v2": x = dot_product_self_attention_relative_v2( q, k, v, bias, max_relative_position, dropout_rate, image_shapes, make_image_summary=make_image_summary, dropout_broadcast_dims=dropout_broadcast_dims, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values) elif attention_type == "local_within_block_mask_right": x = masked_within_block_local_attention_1d( q, k, v, block_length=block_length) elif attention_type == "local_relative_mask_right": x = masked_relative_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary, dropout_rate=dropout_rate, heads_share_relative_embedding=heads_share_relative_embedding, add_relative_to_values=add_relative_to_values, name="masked_relative_local_attention_1d") elif attention_type == "local_mask_right": x = masked_local_attention_1d( q, k, v, block_length=block_length, make_image_summary=make_image_summary) elif attention_type == "local_unmasked": x = local_attention_1d( q, k, v, block_length=block_length, filter_width=block_width) elif attention_type == "masked_dilated_1d": x = masked_dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) else: assert attention_type == "unmasked_dilated_1d" x = dilated_self_attention_1d(q, k, v, block_length, block_width, gap_size, num_memory_blocks) x = combine_heads(x) # Set last dim specifically. x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) if vars_3d: o_var = tf.get_variable( "o", [num_heads, total_value_depth // num_heads, output_depth]) o_var = tf.cast(o_var, x.dtype) o_var = tf.reshape(o_var, [total_value_depth, output_depth]) x = tf.tensordot(x, o_var, axes=1) else: x = common_layers.dense( x, output_depth, use_bias=False, name="output_transform", layer_collection=layer_collection) if recurrent_memory is not None: x = recurrent_memory.post_attention(recurrent_memory_transaction, x) if additional_returned_value is not None: return x, additional_returned_value return x
def get_command_handlers(): ''' Create a map of command names and handlers ''' return { 'activate': activate, 'config': hconfig, 'deactivate': deactivate, 'help': cli_help, 'kill': kill, 'restart': restart, 'submit': submit, 'update': update, 'version': version }
Create a map of command names and handlers
Below is the the instruction that describes the task: ### Input: Create a map of command names and handlers ### Response: def get_command_handlers(): ''' Create a map of command names and handlers ''' return { 'activate': activate, 'config': hconfig, 'deactivate': deactivate, 'help': cli_help, 'kill': kill, 'restart': restart, 'submit': submit, 'update': update, 'version': version }
def configure(): """ Update config """ jira_url = utils.get_input(raw_input, "Jira url") username = utils.get_input(raw_input, "username") password = utils.get_input(getpass.getpass, "password") error_reporting = True \ if 'n' not in raw_input("Would you like to automatically report errors to help improve the software? [y]/N: ").lower() \ else False configuration._save_config(jira_url, username, password, error_reporting) try: connection.jira_connection(configuration.load_config()) except jira_exceptions.JIRAError as e: configuration._delete_config() logging.error("You have an error in your jira connection/configuration: {error}. Please fix the configuration before attempting to use jtime.\n We suggest trying your username without using the email address.".format(error=e))
Update config
Below is the the instruction that describes the task: ### Input: Update config ### Response: def configure(): """ Update config """ jira_url = utils.get_input(raw_input, "Jira url") username = utils.get_input(raw_input, "username") password = utils.get_input(getpass.getpass, "password") error_reporting = True \ if 'n' not in raw_input("Would you like to automatically report errors to help improve the software? [y]/N: ").lower() \ else False configuration._save_config(jira_url, username, password, error_reporting) try: connection.jira_connection(configuration.load_config()) except jira_exceptions.JIRAError as e: configuration._delete_config() logging.error("You have an error in your jira connection/configuration: {error}. Please fix the configuration before attempting to use jtime.\n We suggest trying your username without using the email address.".format(error=e))
def subscribe(self, request, *args, **kwargs): """ Performs the subscribe action. """ self.object = self.get_object() self.object.subscribers.add(request.user) messages.success(self.request, self.success_message) return HttpResponseRedirect(self.get_success_url())
Performs the subscribe action.
Below is the the instruction that describes the task: ### Input: Performs the subscribe action. ### Response: def subscribe(self, request, *args, **kwargs): """ Performs the subscribe action. """ self.object = self.get_object() self.object.subscribers.add(request.user) messages.success(self.request, self.success_message) return HttpResponseRedirect(self.get_success_url())
def _compute_average_correct(input_, labels, per_example_weights, topk=1): """Returns the numerator and denominator of classifier accuracy.""" return _compute_sparse_average_correct( input_, tf.reshape(tf.argmax(labels, 1), [-1, 1]), per_example_weights, topk=topk)
Returns the numerator and denominator of classifier accuracy.
Below is the the instruction that describes the task: ### Input: Returns the numerator and denominator of classifier accuracy. ### Response: def _compute_average_correct(input_, labels, per_example_weights, topk=1): """Returns the numerator and denominator of classifier accuracy.""" return _compute_sparse_average_correct( input_, tf.reshape(tf.argmax(labels, 1), [-1, 1]), per_example_weights, topk=topk)
def _GetRunFlowFlags(args=None): """Retrieves command line flags based on gflags module.""" # There's one rare situation where gsutil will not have argparse # available, but doesn't need anything depending on argparse anyway, # since they're bringing their own credentials. So we just allow this # to fail with an ImportError in those cases. # # TODO(craigcitro): Move this import back to the top when we drop # python 2.6 support (eg when gsutil does). import argparse parser = argparse.ArgumentParser(parents=[tools.argparser]) # Get command line argparse flags. flags, _ = parser.parse_known_args(args=args) # Allow `gflags` and `argparse` to be used side-by-side. if hasattr(FLAGS, 'auth_host_name'): flags.auth_host_name = FLAGS.auth_host_name if hasattr(FLAGS, 'auth_host_port'): flags.auth_host_port = FLAGS.auth_host_port if hasattr(FLAGS, 'auth_local_webserver'): flags.noauth_local_webserver = (not FLAGS.auth_local_webserver) return flags
Retrieves command line flags based on gflags module.
Below is the the instruction that describes the task: ### Input: Retrieves command line flags based on gflags module. ### Response: def _GetRunFlowFlags(args=None): """Retrieves command line flags based on gflags module.""" # There's one rare situation where gsutil will not have argparse # available, but doesn't need anything depending on argparse anyway, # since they're bringing their own credentials. So we just allow this # to fail with an ImportError in those cases. # # TODO(craigcitro): Move this import back to the top when we drop # python 2.6 support (eg when gsutil does). import argparse parser = argparse.ArgumentParser(parents=[tools.argparser]) # Get command line argparse flags. flags, _ = parser.parse_known_args(args=args) # Allow `gflags` and `argparse` to be used side-by-side. if hasattr(FLAGS, 'auth_host_name'): flags.auth_host_name = FLAGS.auth_host_name if hasattr(FLAGS, 'auth_host_port'): flags.auth_host_port = FLAGS.auth_host_port if hasattr(FLAGS, 'auth_local_webserver'): flags.noauth_local_webserver = (not FLAGS.auth_local_webserver) return flags
def resize_image(image, tuple_wh, preserve_aspect=True): """Resizes an instance of a PIL Image. In order to prevent un-intended side effects, this function always returns a copy of the image, as the resize function from PIL returns a copy but the thumbnail function does not. Args: image: An instance of a PIL Image. tuple_wh: A tuple containing the (width, height) for resizing. preserve_aspect: A boolean that determines whether or not the resizing should preserve the image's aspect ratio. Returns: A resized copy of the provided PIL image. """ if preserve_aspect: img_cpy = image.copy() img_cpy.thumbnail(tuple_wh) return img_cpy else: return image.resize(tuple_wh)
Resizes an instance of a PIL Image. In order to prevent un-intended side effects, this function always returns a copy of the image, as the resize function from PIL returns a copy but the thumbnail function does not. Args: image: An instance of a PIL Image. tuple_wh: A tuple containing the (width, height) for resizing. preserve_aspect: A boolean that determines whether or not the resizing should preserve the image's aspect ratio. Returns: A resized copy of the provided PIL image.
Below is the the instruction that describes the task: ### Input: Resizes an instance of a PIL Image. In order to prevent un-intended side effects, this function always returns a copy of the image, as the resize function from PIL returns a copy but the thumbnail function does not. Args: image: An instance of a PIL Image. tuple_wh: A tuple containing the (width, height) for resizing. preserve_aspect: A boolean that determines whether or not the resizing should preserve the image's aspect ratio. Returns: A resized copy of the provided PIL image. ### Response: def resize_image(image, tuple_wh, preserve_aspect=True): """Resizes an instance of a PIL Image. In order to prevent un-intended side effects, this function always returns a copy of the image, as the resize function from PIL returns a copy but the thumbnail function does not. Args: image: An instance of a PIL Image. tuple_wh: A tuple containing the (width, height) for resizing. preserve_aspect: A boolean that determines whether or not the resizing should preserve the image's aspect ratio. Returns: A resized copy of the provided PIL image. """ if preserve_aspect: img_cpy = image.copy() img_cpy.thumbnail(tuple_wh) return img_cpy else: return image.resize(tuple_wh)
def slicenet_params1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.batch_size = 1024 hparams.hidden_size = 768 hparams.dropout = 0.5 hparams.symbol_dropout = 0.2 hparams.label_smoothing = 0.1 hparams.clip_grad_norm = 2.0 hparams.num_hidden_layers = 4 hparams.kernel_height = 3 hparams.kernel_width = 1 hparams.norm_type = "layer" hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 hparams.weight_decay = 3.0 hparams.num_sampled_classes = 0 hparams.sampling_method = "argmax" hparams.optimizer_adam_epsilon = 1e-6 hparams.optimizer_adam_beta1 = 0.85 hparams.optimizer_adam_beta2 = 0.997 hparams.add_hparam("large_kernel_size", 15) # New ones are added like this. hparams.add_hparam("separability", -2) # A dilation scheme, one of _DILATION_SCHEMES. hparams.add_hparam("dilation_scheme", "1.1.1.1") # A kernel scheme, one of _KERNEL_SCHEMES; overrides large_kernel_size. hparams.add_hparam("kernel_scheme", "3.7.15.31") hparams.add_hparam("audio_compression", 8) # attention-related flags hparams.add_hparam("attention_type", "simple") hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("sim_loss_mult", 0.0) # Try 10.0 for experiments. hparams.add_hparam("attention_dropout", 0.2) hparams.shared_embedding_and_softmax_weights = True return hparams
Set of hyperparameters.
Below is the the instruction that describes the task: ### Input: Set of hyperparameters. ### Response: def slicenet_params1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() hparams.batch_size = 1024 hparams.hidden_size = 768 hparams.dropout = 0.5 hparams.symbol_dropout = 0.2 hparams.label_smoothing = 0.1 hparams.clip_grad_norm = 2.0 hparams.num_hidden_layers = 4 hparams.kernel_height = 3 hparams.kernel_width = 1 hparams.norm_type = "layer" hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 hparams.weight_decay = 3.0 hparams.num_sampled_classes = 0 hparams.sampling_method = "argmax" hparams.optimizer_adam_epsilon = 1e-6 hparams.optimizer_adam_beta1 = 0.85 hparams.optimizer_adam_beta2 = 0.997 hparams.add_hparam("large_kernel_size", 15) # New ones are added like this. hparams.add_hparam("separability", -2) # A dilation scheme, one of _DILATION_SCHEMES. hparams.add_hparam("dilation_scheme", "1.1.1.1") # A kernel scheme, one of _KERNEL_SCHEMES; overrides large_kernel_size. hparams.add_hparam("kernel_scheme", "3.7.15.31") hparams.add_hparam("audio_compression", 8) # attention-related flags hparams.add_hparam("attention_type", "simple") hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("sim_loss_mult", 0.0) # Try 10.0 for experiments. hparams.add_hparam("attention_dropout", 0.2) hparams.shared_embedding_and_softmax_weights = True return hparams
def translation_save_translated_fields(instance, **kwargs): """ Save all the translations of instance in post_save signal handler. """ if not hasattr(instance, '_translation_cache'): return for l_id, translation in instance._translation_cache.iteritems(): # set the translation ID just in case the translation was # created while instance was not stored in the DB yet # note: we're using _get_pk_val here even though it is # private, since that's the most reliable way to get the value # on older Django (pk property did not exist yet) translation.master_id = instance._get_pk_val() translation.save()
Save all the translations of instance in post_save signal handler.
Below is the the instruction that describes the task: ### Input: Save all the translations of instance in post_save signal handler. ### Response: def translation_save_translated_fields(instance, **kwargs): """ Save all the translations of instance in post_save signal handler. """ if not hasattr(instance, '_translation_cache'): return for l_id, translation in instance._translation_cache.iteritems(): # set the translation ID just in case the translation was # created while instance was not stored in the DB yet # note: we're using _get_pk_val here even though it is # private, since that's the most reliable way to get the value # on older Django (pk property did not exist yet) translation.master_id = instance._get_pk_val() translation.save()
def unversioned(cls, coord): """The coordinate without the version. :param M2Coordinate coord: an M2Coordinate or JarDependency. :return: the coordinate without the version. :rtype: M2Coordinate """ coord = cls.create(coord) if coord.rev is None: return coord return M2Coordinate(org=coord.org, name=coord.name, classifier=coord.classifier, ext=coord.ext)
The coordinate without the version. :param M2Coordinate coord: an M2Coordinate or JarDependency. :return: the coordinate without the version. :rtype: M2Coordinate
Below is the the instruction that describes the task: ### Input: The coordinate without the version. :param M2Coordinate coord: an M2Coordinate or JarDependency. :return: the coordinate without the version. :rtype: M2Coordinate ### Response: def unversioned(cls, coord): """The coordinate without the version. :param M2Coordinate coord: an M2Coordinate or JarDependency. :return: the coordinate without the version. :rtype: M2Coordinate """ coord = cls.create(coord) if coord.rev is None: return coord return M2Coordinate(org=coord.org, name=coord.name, classifier=coord.classifier, ext=coord.ext)
def _GetElementDataTypeDefinition(self, data_type_definition): """Retrieves the element data type definition. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeDefinition: element data type definition. Raises: FormatError: if the element data type cannot be determined from the data type definition. """ if not data_type_definition: raise errors.FormatError('Missing data type definition') element_data_type_definition = getattr( data_type_definition, 'element_data_type_definition', None) if not element_data_type_definition: raise errors.FormatError( 'Invalid data type definition missing element') return element_data_type_definition
Retrieves the element data type definition. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeDefinition: element data type definition. Raises: FormatError: if the element data type cannot be determined from the data type definition.
Below is the the instruction that describes the task: ### Input: Retrieves the element data type definition. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeDefinition: element data type definition. Raises: FormatError: if the element data type cannot be determined from the data type definition. ### Response: def _GetElementDataTypeDefinition(self, data_type_definition): """Retrieves the element data type definition. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: DataTypeDefinition: element data type definition. Raises: FormatError: if the element data type cannot be determined from the data type definition. """ if not data_type_definition: raise errors.FormatError('Missing data type definition') element_data_type_definition = getattr( data_type_definition, 'element_data_type_definition', None) if not element_data_type_definition: raise errors.FormatError( 'Invalid data type definition missing element') return element_data_type_definition
def get_multi_match_mapping(self) -> Dict[Type, List[Type]]: """ Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches. """ if self._multi_match_mapping is None: self._multi_match_mapping = {} basic_types = self.get_basic_types() for basic_type in basic_types: if isinstance(basic_type, types.MultiMatchNamedBasicType): matched_types: List[str] = [] # We need to check if each type in the `types_to_match` field for the given # MultiMatchNamedBasic type is itself in the set of basic types allowed in this # world, and add it to the mapping only if it is. Some basic types that the # multi match type can match with may be diallowed in the world due to the # instance-specific context. for type_ in basic_type.types_to_match: if type_ in basic_types: matched_types.append(type_) self._multi_match_mapping[basic_type] = matched_types return self._multi_match_mapping
Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches.
Below is the the instruction that describes the task: ### Input: Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches. ### Response: def get_multi_match_mapping(self) -> Dict[Type, List[Type]]: """ Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches. """ if self._multi_match_mapping is None: self._multi_match_mapping = {} basic_types = self.get_basic_types() for basic_type in basic_types: if isinstance(basic_type, types.MultiMatchNamedBasicType): matched_types: List[str] = [] # We need to check if each type in the `types_to_match` field for the given # MultiMatchNamedBasic type is itself in the set of basic types allowed in this # world, and add it to the mapping only if it is. Some basic types that the # multi match type can match with may be diallowed in the world due to the # instance-specific context. for type_ in basic_type.types_to_match: if type_ in basic_types: matched_types.append(type_) self._multi_match_mapping[basic_type] = matched_types return self._multi_match_mapping
def link_cloud(self, username=None, password=None, device_id=None): """Create and store a token for interacting with the IOTile Cloud API. You will need to call link_cloud once for each virtualenv that you create and want to use with any api calls that touch iotile cloud. Note that this method is called on a ConfigManager instance If you do not pass your username or password it will be prompted from you securely on stdin. If you are logging in for a user, the token will expire periodically and you will have to relogin. If you pass a device_id, you can obtain a limited token for that device that will never expire, assuming you have access to that device. Args: username (string): Your iotile.cloud username. This is prompted from stdin if not provided. password (string): Your iotile.cloud password. This is prompted from stdin if not provided. device_id (int): Optional device id to obtain permanent credentials for a device. """ reg = ComponentRegistry() domain = self.get('cloud:server') if username is None: prompt_str = "Please enter your IOTile.cloud email: " username = input(prompt_str) if password is None: prompt_str = "Please enter your IOTile.cloud password: " password = getpass.getpass(prompt_str) cloud = Api(domain=domain) ok_resp = cloud.login(email=username, password=password) if not ok_resp: raise ArgumentError("Could not login to iotile.cloud as user %s" % username) reg.set_config('arch:cloud_user', cloud.username) reg.set_config('arch:cloud_token', cloud.token) reg.set_config('arch:cloud_token_type', cloud.token_type) if device_id is not None: cloud = IOTileCloud() cloud.impersonate_device(device_id)
Create and store a token for interacting with the IOTile Cloud API. You will need to call link_cloud once for each virtualenv that you create and want to use with any api calls that touch iotile cloud. Note that this method is called on a ConfigManager instance If you do not pass your username or password it will be prompted from you securely on stdin. If you are logging in for a user, the token will expire periodically and you will have to relogin. If you pass a device_id, you can obtain a limited token for that device that will never expire, assuming you have access to that device. Args: username (string): Your iotile.cloud username. This is prompted from stdin if not provided. password (string): Your iotile.cloud password. This is prompted from stdin if not provided. device_id (int): Optional device id to obtain permanent credentials for a device.
Below is the the instruction that describes the task: ### Input: Create and store a token for interacting with the IOTile Cloud API. You will need to call link_cloud once for each virtualenv that you create and want to use with any api calls that touch iotile cloud. Note that this method is called on a ConfigManager instance If you do not pass your username or password it will be prompted from you securely on stdin. If you are logging in for a user, the token will expire periodically and you will have to relogin. If you pass a device_id, you can obtain a limited token for that device that will never expire, assuming you have access to that device. Args: username (string): Your iotile.cloud username. This is prompted from stdin if not provided. password (string): Your iotile.cloud password. This is prompted from stdin if not provided. device_id (int): Optional device id to obtain permanent credentials for a device. ### Response: def link_cloud(self, username=None, password=None, device_id=None): """Create and store a token for interacting with the IOTile Cloud API. You will need to call link_cloud once for each virtualenv that you create and want to use with any api calls that touch iotile cloud. Note that this method is called on a ConfigManager instance If you do not pass your username or password it will be prompted from you securely on stdin. If you are logging in for a user, the token will expire periodically and you will have to relogin. If you pass a device_id, you can obtain a limited token for that device that will never expire, assuming you have access to that device. Args: username (string): Your iotile.cloud username. This is prompted from stdin if not provided. password (string): Your iotile.cloud password. This is prompted from stdin if not provided. device_id (int): Optional device id to obtain permanent credentials for a device. """ reg = ComponentRegistry() domain = self.get('cloud:server') if username is None: prompt_str = "Please enter your IOTile.cloud email: " username = input(prompt_str) if password is None: prompt_str = "Please enter your IOTile.cloud password: " password = getpass.getpass(prompt_str) cloud = Api(domain=domain) ok_resp = cloud.login(email=username, password=password) if not ok_resp: raise ArgumentError("Could not login to iotile.cloud as user %s" % username) reg.set_config('arch:cloud_user', cloud.username) reg.set_config('arch:cloud_token', cloud.token) reg.set_config('arch:cloud_token_type', cloud.token_type) if device_id is not None: cloud = IOTileCloud() cloud.impersonate_device(device_id)
def p_operation_invocation_1(self, p): '''instance_invocation : structure DOT identifier LPAREN parameter_list RPAREN''' p[0] = InstanceInvocationNode(handle=p[1], action_name=p[3], parameter_list=p[5])
instance_invocation : structure DOT identifier LPAREN parameter_list RPAREN
Below is the the instruction that describes the task: ### Input: instance_invocation : structure DOT identifier LPAREN parameter_list RPAREN ### Response: def p_operation_invocation_1(self, p): '''instance_invocation : structure DOT identifier LPAREN parameter_list RPAREN''' p[0] = InstanceInvocationNode(handle=p[1], action_name=p[3], parameter_list=p[5])
def collection(self, path): """To return all items generated by get collection.""" data = [] for item in self.get_collection(path): data.append(item) return data
To return all items generated by get collection.
Below is the the instruction that describes the task: ### Input: To return all items generated by get collection. ### Response: def collection(self, path): """To return all items generated by get collection.""" data = [] for item in self.get_collection(path): data.append(item) return data
def populate_connections(self, metamodel): ''' Populate links in a *metamodel* with connections between them. ''' storage = dict() for ass in metamodel.associations: source_class = ass.source_link.to_metaclass target_class = ass.target_link.to_metaclass if target_class not in storage: storage[target_class] = dict() link_key = frozenset(ass.source_link.key_map.values()) if link_key not in storage[target_class]: storage[target_class][link_key] = dict() for other_inst in target_class.storage: inst_key = ass.source_link.compute_index_key(other_inst) if inst_key is None: continue if inst_key not in storage[target_class][link_key]: storage[target_class][link_key][inst_key] = xtuml.OrderedSet() storage[target_class][link_key][inst_key].add(other_inst) for inst in source_class.storage: inst_key = ass.source_link.compute_lookup_key(inst) if inst_key is None: continue if inst_key not in storage[target_class][link_key]: continue for other_inst in storage[target_class][link_key][inst_key]: ass.source_link.connect(other_inst, inst, check=False) ass.target_link.connect(inst, other_inst, check=False) for inst in metamodel.instances: metaclass = xtuml.get_metaclass(inst) for attr in metaclass.referential_attributes: if attr in inst.__dict__: delattr(inst, attr)
Populate links in a *metamodel* with connections between them.
Below is the the instruction that describes the task: ### Input: Populate links in a *metamodel* with connections between them. ### Response: def populate_connections(self, metamodel): ''' Populate links in a *metamodel* with connections between them. ''' storage = dict() for ass in metamodel.associations: source_class = ass.source_link.to_metaclass target_class = ass.target_link.to_metaclass if target_class not in storage: storage[target_class] = dict() link_key = frozenset(ass.source_link.key_map.values()) if link_key not in storage[target_class]: storage[target_class][link_key] = dict() for other_inst in target_class.storage: inst_key = ass.source_link.compute_index_key(other_inst) if inst_key is None: continue if inst_key not in storage[target_class][link_key]: storage[target_class][link_key][inst_key] = xtuml.OrderedSet() storage[target_class][link_key][inst_key].add(other_inst) for inst in source_class.storage: inst_key = ass.source_link.compute_lookup_key(inst) if inst_key is None: continue if inst_key not in storage[target_class][link_key]: continue for other_inst in storage[target_class][link_key][inst_key]: ass.source_link.connect(other_inst, inst, check=False) ass.target_link.connect(inst, other_inst, check=False) for inst in metamodel.instances: metaclass = xtuml.get_metaclass(inst) for attr in metaclass.referential_attributes: if attr in inst.__dict__: delattr(inst, attr)
def _set_show_mpls_policy(self, v, load=False): """ Setter method for show_mpls_policy, mapped from YANG variable /brocade_mpls_rpc/show_mpls_policy (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_mpls_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_mpls_policy() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_mpls_policy.show_mpls_policy, is_leaf=True, yang_name="show-mpls-policy", rest_name="show-mpls-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMpls'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_mpls_policy must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_mpls_policy.show_mpls_policy, is_leaf=True, yang_name="show-mpls-policy", rest_name="show-mpls-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMpls'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__show_mpls_policy = t if hasattr(self, '_set'): self._set()
Setter method for show_mpls_policy, mapped from YANG variable /brocade_mpls_rpc/show_mpls_policy (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_mpls_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_mpls_policy() directly.
Below is the the instruction that describes the task: ### Input: Setter method for show_mpls_policy, mapped from YANG variable /brocade_mpls_rpc/show_mpls_policy (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_mpls_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_mpls_policy() directly. ### Response: def _set_show_mpls_policy(self, v, load=False): """ Setter method for show_mpls_policy, mapped from YANG variable /brocade_mpls_rpc/show_mpls_policy (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_mpls_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_mpls_policy() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_mpls_policy.show_mpls_policy, is_leaf=True, yang_name="show-mpls-policy", rest_name="show-mpls-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMpls'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """show_mpls_policy must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=show_mpls_policy.show_mpls_policy, is_leaf=True, yang_name="show-mpls-policy", rest_name="show-mpls-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMpls'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__show_mpls_policy = t if hasattr(self, '_set'): self._set()
def base_url(self): """A base_url that will be used to construct the final URL we're going to query against. :returns: A URL of the form: ``proto://host:port``. :rtype: :obj:`string` """ return '{proto}://{host}:{port}{url_path}'.format( proto=self.protocol, host=self.host, port=self.port, url_path=self.url_path, )
A base_url that will be used to construct the final URL we're going to query against. :returns: A URL of the form: ``proto://host:port``. :rtype: :obj:`string`
Below is the the instruction that describes the task: ### Input: A base_url that will be used to construct the final URL we're going to query against. :returns: A URL of the form: ``proto://host:port``. :rtype: :obj:`string` ### Response: def base_url(self): """A base_url that will be used to construct the final URL we're going to query against. :returns: A URL of the form: ``proto://host:port``. :rtype: :obj:`string` """ return '{proto}://{host}:{port}{url_path}'.format( proto=self.protocol, host=self.host, port=self.port, url_path=self.url_path, )
def write_conll(self, fname): """ Serializes the dataset in CONLL format to fname """ if 'label' not in self.fields: raise InvalidFieldsException("dataset is not in CONLL format: missing label field") def instance_to_conll(inst): tab = [v for k, v in inst.items() if k != 'label'] return '{}\n{}'.format(inst['label'], '\n'.join(['\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)])) with open(fname, 'wb') as f: f.write('# {}'.format('\t'.join([k for k in self.fields if k != 'label']))) for i, d in enumerate(self): f.write('\n{}'.format(instance_to_conll(d))) if i != len(self) - 1: f.write('\n')
Serializes the dataset in CONLL format to fname
Below is the the instruction that describes the task: ### Input: Serializes the dataset in CONLL format to fname ### Response: def write_conll(self, fname): """ Serializes the dataset in CONLL format to fname """ if 'label' not in self.fields: raise InvalidFieldsException("dataset is not in CONLL format: missing label field") def instance_to_conll(inst): tab = [v for k, v in inst.items() if k != 'label'] return '{}\n{}'.format(inst['label'], '\n'.join(['\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)])) with open(fname, 'wb') as f: f.write('# {}'.format('\t'.join([k for k in self.fields if k != 'label']))) for i, d in enumerate(self): f.write('\n{}'.format(instance_to_conll(d))) if i != len(self) - 1: f.write('\n')
def get_breadcrumbs(self): """ Breadcrumb format: (('name', 'url'), ...) or None if not used. """ if not self.breadcrumbs: return None else: allowed_breadcrumbs = [] for breadcrumb in self.breadcrumbs: # check permission based on named_url if breadcrumb[1] is not None and not view_from_url( breadcrumb[1] ).has_permission(self.request.user): continue obj = self if not hasattr(self, "object") else self.object url = ( None if not breadcrumb[1] else reverse_url(breadcrumb[1], obj) ) allowed_breadcrumbs.append({"name": breadcrumb[0], "url": url}) return allowed_breadcrumbs
Breadcrumb format: (('name', 'url'), ...) or None if not used.
Below is the the instruction that describes the task: ### Input: Breadcrumb format: (('name', 'url'), ...) or None if not used. ### Response: def get_breadcrumbs(self): """ Breadcrumb format: (('name', 'url'), ...) or None if not used. """ if not self.breadcrumbs: return None else: allowed_breadcrumbs = [] for breadcrumb in self.breadcrumbs: # check permission based on named_url if breadcrumb[1] is not None and not view_from_url( breadcrumb[1] ).has_permission(self.request.user): continue obj = self if not hasattr(self, "object") else self.object url = ( None if not breadcrumb[1] else reverse_url(breadcrumb[1], obj) ) allowed_breadcrumbs.append({"name": breadcrumb[0], "url": url}) return allowed_breadcrumbs
def multithreader(args, paths): """Execute multiple processes at once.""" def shellprocess(path): """Return a ready-to-use subprocess.""" import subprocess return subprocess.Popen(args + [path], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) processes = [shellprocess(path) for path in paths] for process in processes: process.wait()
Execute multiple processes at once.
Below is the the instruction that describes the task: ### Input: Execute multiple processes at once. ### Response: def multithreader(args, paths): """Execute multiple processes at once.""" def shellprocess(path): """Return a ready-to-use subprocess.""" import subprocess return subprocess.Popen(args + [path], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) processes = [shellprocess(path) for path in paths] for process in processes: process.wait()
def bind_arguments(func, args, kwargs): """Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments. """ ( args, kwargs, missing, extra, extra_positional, arg_spec, vararg_var, kwarg_var, ) = _parse_signature(func)(args, kwargs) values = {} for (name, _has_default, _default), value in zip(arg_spec, args): values[name] = value if vararg_var is not None: values[vararg_var] = tuple(extra_positional) elif extra_positional: raise TypeError("too many positional arguments") if kwarg_var is not None: multikw = set(extra) & set([x[0] for x in arg_spec]) if multikw: raise TypeError( "got multiple values for keyword argument " + repr(next(iter(multikw))) ) values[kwarg_var] = extra elif extra: raise TypeError("got unexpected keyword argument " + repr(next(iter(extra)))) return values
Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments.
Below is the the instruction that describes the task: ### Input: Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments. ### Response: def bind_arguments(func, args, kwargs): """Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments. """ ( args, kwargs, missing, extra, extra_positional, arg_spec, vararg_var, kwarg_var, ) = _parse_signature(func)(args, kwargs) values = {} for (name, _has_default, _default), value in zip(arg_spec, args): values[name] = value if vararg_var is not None: values[vararg_var] = tuple(extra_positional) elif extra_positional: raise TypeError("too many positional arguments") if kwarg_var is not None: multikw = set(extra) & set([x[0] for x in arg_spec]) if multikw: raise TypeError( "got multiple values for keyword argument " + repr(next(iter(multikw))) ) values[kwarg_var] = extra elif extra: raise TypeError("got unexpected keyword argument " + repr(next(iter(extra)))) return values
def get_tags(self, tagtype): ''' Get all tags of a type ''' return [t for t in self.__tags if t.tagtype == tagtype]
Get all tags of a type
Below is the the instruction that describes the task: ### Input: Get all tags of a type ### Response: def get_tags(self, tagtype): ''' Get all tags of a type ''' return [t for t in self.__tags if t.tagtype == tagtype]
def add_arguments(self, parser): """Adds the arguments for the emulator command. Args: self (EmulatorCommand): the ``EmulatorCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None`` """ group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-l', '--list', nargs='?', type=str.lower, default='_', choices=['usb', 'ip'], help='list all the connected emulators') group.add_argument('-s', '--supported', nargs=1, help='query whether a device is supported') group.add_argument('-t', '--test', action='store_true', help='perform a self-test') return None
Adds the arguments for the emulator command. Args: self (EmulatorCommand): the ``EmulatorCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None``
Below is the the instruction that describes the task: ### Input: Adds the arguments for the emulator command. Args: self (EmulatorCommand): the ``EmulatorCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None`` ### Response: def add_arguments(self, parser): """Adds the arguments for the emulator command. Args: self (EmulatorCommand): the ``EmulatorCommand`` instance parser (argparse.ArgumentParser): parser to add the commands to Returns: ``None`` """ group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-l', '--list', nargs='?', type=str.lower, default='_', choices=['usb', 'ip'], help='list all the connected emulators') group.add_argument('-s', '--supported', nargs=1, help='query whether a device is supported') group.add_argument('-t', '--test', action='store_true', help='perform a self-test') return None
def _get_current_minute(self): """ Internal utility method to get the current simulation time. Possible answers are: - whatever the algorithm's get_datetime() method returns (this is what `self.simulation_dt_func()` points to) - sometimes we're knowingly not in a market minute, like if we're in before_trading_start. In that case, `self._adjust_minutes` is True, and we get the previous market minute. - if we're in daily mode, get the session label for this minute. """ dt = self.datetime if self._adjust_minutes: dt = \ self.data_portal.trading_calendar.previous_minute(dt) if self._daily_mode: # if we're in daily mode, take the given dt (which is the last # minute of the session) and get the session label for it. dt = self.data_portal.trading_calendar.minute_to_session_label(dt) return dt
Internal utility method to get the current simulation time. Possible answers are: - whatever the algorithm's get_datetime() method returns (this is what `self.simulation_dt_func()` points to) - sometimes we're knowingly not in a market minute, like if we're in before_trading_start. In that case, `self._adjust_minutes` is True, and we get the previous market minute. - if we're in daily mode, get the session label for this minute.
Below is the the instruction that describes the task: ### Input: Internal utility method to get the current simulation time. Possible answers are: - whatever the algorithm's get_datetime() method returns (this is what `self.simulation_dt_func()` points to) - sometimes we're knowingly not in a market minute, like if we're in before_trading_start. In that case, `self._adjust_minutes` is True, and we get the previous market minute. - if we're in daily mode, get the session label for this minute. ### Response: def _get_current_minute(self): """ Internal utility method to get the current simulation time. Possible answers are: - whatever the algorithm's get_datetime() method returns (this is what `self.simulation_dt_func()` points to) - sometimes we're knowingly not in a market minute, like if we're in before_trading_start. In that case, `self._adjust_minutes` is True, and we get the previous market minute. - if we're in daily mode, get the session label for this minute. """ dt = self.datetime if self._adjust_minutes: dt = \ self.data_portal.trading_calendar.previous_minute(dt) if self._daily_mode: # if we're in daily mode, take the given dt (which is the last # minute of the session) and get the session label for it. dt = self.data_portal.trading_calendar.minute_to_session_label(dt) return dt
def read_stems( filename, out_type=np.float_, stem_id=None, start=0, duration=None, info=None ): """Read STEMS format into numpy Tensor Parameters ---------- filename : str Filename of STEMS format. Typically `filename.stem.mp4`. out_type : type Output type. Defaults to 32bit float aka `np.float32`. stem_id : int Stem ID (Stream ID) to read. Defaults to `None`, which reads all available stems. start : float Start position (seek) in seconds, defaults to 0. duration : float Read `duration` seconds. End position then is `start + duration`. Defaults to `None`: read till the end. info : object provide info object, useful if read_stems is called frequently on file with same configuration (#streams, #channels, samplerate). Returns ------- stems : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. Notes ----- Input is expected to be in 16bit/44.1 kHz """ if info is None: FFinfo = Info(filename) else: FFinfo = info if stem_id is not None: substreams = stem_id else: substreams = FFinfo.audio_stream_idx() if not isinstance(substreams, list): substreams = [substreams] stems = [] tmps = [ tmp.NamedTemporaryFile(delete=False, suffix='.wav') for t in substreams ] for tmp_id, stem in enumerate(substreams): rate = FFinfo.rate(stem) channels = FFinfo.channels(stem) cmd = [ 'ffmpeg', '-y', '-vn', '-i', filename, '-map', '0:' + str(stem), '-acodec', 'pcm_s16le', '-ar', str(rate), '-ac', str(channels), '-loglevel', 'error', tmps[tmp_id].name ] if start: cmd.insert(3, '-ss') cmd.insert(4, str(start)) if duration is not None: cmd.insert(-1, '-t') cmd.insert(-1, str(duration)) sp.call(cmd) # read wav files audio, rate = sf.read(tmps[tmp_id].name) tmps[tmp_id].close() os.remove(tmps[tmp_id].name) stems.append(audio) # check if all stems have the same duration stem_durations = np.array([t.shape[0] for t in stems]) if not (stem_durations == stem_durations[0]).all(): warnings.warn("Warning.......Stems differ in length and were shortend") min_length = np.min(stem_durations) stems = [t[:min_length, :] for t in stems] stems = np.array(stems) stems = np.squeeze(stems).astype(out_type) return stems, rate
Read STEMS format into numpy Tensor Parameters ---------- filename : str Filename of STEMS format. Typically `filename.stem.mp4`. out_type : type Output type. Defaults to 32bit float aka `np.float32`. stem_id : int Stem ID (Stream ID) to read. Defaults to `None`, which reads all available stems. start : float Start position (seek) in seconds, defaults to 0. duration : float Read `duration` seconds. End position then is `start + duration`. Defaults to `None`: read till the end. info : object provide info object, useful if read_stems is called frequently on file with same configuration (#streams, #channels, samplerate). Returns ------- stems : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. Notes ----- Input is expected to be in 16bit/44.1 kHz
Below is the the instruction that describes the task: ### Input: Read STEMS format into numpy Tensor Parameters ---------- filename : str Filename of STEMS format. Typically `filename.stem.mp4`. out_type : type Output type. Defaults to 32bit float aka `np.float32`. stem_id : int Stem ID (Stream ID) to read. Defaults to `None`, which reads all available stems. start : float Start position (seek) in seconds, defaults to 0. duration : float Read `duration` seconds. End position then is `start + duration`. Defaults to `None`: read till the end. info : object provide info object, useful if read_stems is called frequently on file with same configuration (#streams, #channels, samplerate). Returns ------- stems : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. Notes ----- Input is expected to be in 16bit/44.1 kHz ### Response: def read_stems( filename, out_type=np.float_, stem_id=None, start=0, duration=None, info=None ): """Read STEMS format into numpy Tensor Parameters ---------- filename : str Filename of STEMS format. Typically `filename.stem.mp4`. out_type : type Output type. Defaults to 32bit float aka `np.float32`. stem_id : int Stem ID (Stream ID) to read. Defaults to `None`, which reads all available stems. start : float Start position (seek) in seconds, defaults to 0. duration : float Read `duration` seconds. End position then is `start + duration`. Defaults to `None`: read till the end. info : object provide info object, useful if read_stems is called frequently on file with same configuration (#streams, #channels, samplerate). Returns ------- stems : array_like The tensor of Matrix of stems. The data shape is formatted as :code:`stems x channels x samples`. Notes ----- Input is expected to be in 16bit/44.1 kHz """ if info is None: FFinfo = Info(filename) else: FFinfo = info if stem_id is not None: substreams = stem_id else: substreams = FFinfo.audio_stream_idx() if not isinstance(substreams, list): substreams = [substreams] stems = [] tmps = [ tmp.NamedTemporaryFile(delete=False, suffix='.wav') for t in substreams ] for tmp_id, stem in enumerate(substreams): rate = FFinfo.rate(stem) channels = FFinfo.channels(stem) cmd = [ 'ffmpeg', '-y', '-vn', '-i', filename, '-map', '0:' + str(stem), '-acodec', 'pcm_s16le', '-ar', str(rate), '-ac', str(channels), '-loglevel', 'error', tmps[tmp_id].name ] if start: cmd.insert(3, '-ss') cmd.insert(4, str(start)) if duration is not None: cmd.insert(-1, '-t') cmd.insert(-1, str(duration)) sp.call(cmd) # read wav files audio, rate = sf.read(tmps[tmp_id].name) tmps[tmp_id].close() os.remove(tmps[tmp_id].name) stems.append(audio) # check if all stems have the same duration stem_durations = np.array([t.shape[0] for t in stems]) if not (stem_durations == stem_durations[0]).all(): warnings.warn("Warning.......Stems differ in length and were shortend") min_length = np.min(stem_durations) stems = [t[:min_length, :] for t in stems] stems = np.array(stems) stems = np.squeeze(stems).astype(out_type) return stems, rate
def run_terraform_init(tf_bin, # pylint: disable=too-many-arguments module_path, backend_options, env_name, env_region, env_vars): """Run Terraform init.""" init_cmd = [tf_bin, 'init'] cmd_opts = {'env_vars': env_vars, 'exit_on_error': False} if backend_options.get('config'): LOGGER.info('Using provided backend values "%s"', str(backend_options.get('config'))) cmd_opts['cmd_list'] = init_cmd + get_backend_init_list(backend_options.get('config')) # noqa pylint: disable=line-too-long elif os.path.isfile(os.path.join(module_path, backend_options.get('filename'))): LOGGER.info('Using backend config file %s', backend_options.get('filename')) cmd_opts['cmd_list'] = init_cmd + ['-backend-config=%s' % backend_options.get('filename')] # noqa pylint: disable=line-too-long else: LOGGER.info( "No backend tfvars file found -- looking for one " "of \"%s\" (proceeding with bare 'terraform " "init')", ', '.join(gen_backend_tfvars_files( env_name, env_region))) cmd_opts['cmd_list'] = init_cmd try: run_module_command(**cmd_opts) except subprocess.CalledProcessError as shelloutexc: # An error during initialization can leave things in an inconsistent # state (e.g. backend configured but no providers downloaded). Marking # this with a file so it will be deleted on the next run. if os.path.isdir(os.path.join(module_path, '.terraform')): with open(os.path.join(module_path, '.terraform', FAILED_INIT_FILENAME), 'w') as stream: stream.write('1') sys.exit(shelloutexc.returncode)
Run Terraform init.
Below is the the instruction that describes the task: ### Input: Run Terraform init. ### Response: def run_terraform_init(tf_bin, # pylint: disable=too-many-arguments module_path, backend_options, env_name, env_region, env_vars): """Run Terraform init.""" init_cmd = [tf_bin, 'init'] cmd_opts = {'env_vars': env_vars, 'exit_on_error': False} if backend_options.get('config'): LOGGER.info('Using provided backend values "%s"', str(backend_options.get('config'))) cmd_opts['cmd_list'] = init_cmd + get_backend_init_list(backend_options.get('config')) # noqa pylint: disable=line-too-long elif os.path.isfile(os.path.join(module_path, backend_options.get('filename'))): LOGGER.info('Using backend config file %s', backend_options.get('filename')) cmd_opts['cmd_list'] = init_cmd + ['-backend-config=%s' % backend_options.get('filename')] # noqa pylint: disable=line-too-long else: LOGGER.info( "No backend tfvars file found -- looking for one " "of \"%s\" (proceeding with bare 'terraform " "init')", ', '.join(gen_backend_tfvars_files( env_name, env_region))) cmd_opts['cmd_list'] = init_cmd try: run_module_command(**cmd_opts) except subprocess.CalledProcessError as shelloutexc: # An error during initialization can leave things in an inconsistent # state (e.g. backend configured but no providers downloaded). Marking # this with a file so it will be deleted on the next run. if os.path.isdir(os.path.join(module_path, '.terraform')): with open(os.path.join(module_path, '.terraform', FAILED_INIT_FILENAME), 'w') as stream: stream.write('1') sys.exit(shelloutexc.returncode)
def refresh_signal_handler(self, signo, frame): """ This callback is called when SIGUSR1 signal is received. It updates outputs of all modules by calling their `run` method. Interval modules are updated in separate threads if their interval is above a certain treshold value. This treshold is computed by :func:`compute_treshold_interval` class method. The reasoning is that modules with larger intervals also usually take longer to refresh their output and that their output is not required in 'real time'. This also prevents possible lag when updating all modules in a row. """ if signo != signal.SIGUSR1: return for module in self.modules: if hasattr(module, "interval"): if module.interval > self.treshold_interval: thread = Thread(target=module.run) thread.start() else: module.run() else: module.run() self.async_refresh()
This callback is called when SIGUSR1 signal is received. It updates outputs of all modules by calling their `run` method. Interval modules are updated in separate threads if their interval is above a certain treshold value. This treshold is computed by :func:`compute_treshold_interval` class method. The reasoning is that modules with larger intervals also usually take longer to refresh their output and that their output is not required in 'real time'. This also prevents possible lag when updating all modules in a row.
Below is the the instruction that describes the task: ### Input: This callback is called when SIGUSR1 signal is received. It updates outputs of all modules by calling their `run` method. Interval modules are updated in separate threads if their interval is above a certain treshold value. This treshold is computed by :func:`compute_treshold_interval` class method. The reasoning is that modules with larger intervals also usually take longer to refresh their output and that their output is not required in 'real time'. This also prevents possible lag when updating all modules in a row. ### Response: def refresh_signal_handler(self, signo, frame): """ This callback is called when SIGUSR1 signal is received. It updates outputs of all modules by calling their `run` method. Interval modules are updated in separate threads if their interval is above a certain treshold value. This treshold is computed by :func:`compute_treshold_interval` class method. The reasoning is that modules with larger intervals also usually take longer to refresh their output and that their output is not required in 'real time'. This also prevents possible lag when updating all modules in a row. """ if signo != signal.SIGUSR1: return for module in self.modules: if hasattr(module, "interval"): if module.interval > self.treshold_interval: thread = Thread(target=module.run) thread.start() else: module.run() else: module.run() self.async_refresh()
def forward(self, x): """Feed-forward the model.""" return self.layers(x * (self._filter * self.fs_filter).expand_as(x))
Feed-forward the model.
Below is the the instruction that describes the task: ### Input: Feed-forward the model. ### Response: def forward(self, x): """Feed-forward the model.""" return self.layers(x * (self._filter * self.fs_filter).expand_as(x))
def optimization_updates(self, params, gradients): """ Return updates from optimization. """ updates, free_parameters = optimize_updates(params, gradients, self.config) self.network.free_parameters.extend(free_parameters) logging.info("Added %d free parameters for optimization" % len(free_parameters)) return updates
Return updates from optimization.
Below is the the instruction that describes the task: ### Input: Return updates from optimization. ### Response: def optimization_updates(self, params, gradients): """ Return updates from optimization. """ updates, free_parameters = optimize_updates(params, gradients, self.config) self.network.free_parameters.extend(free_parameters) logging.info("Added %d free parameters for optimization" % len(free_parameters)) return updates
def app_update_state(app_id,state): """ update app state """ try: create_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') conn = get_conn() c = conn.cursor() c.execute("UPDATE app SET state='{0}',change_at='{1}' WHERE id='{2}'".format(state, create_at, app_id)) conn.commit() conn.close() print 'UPDATE app %s state to %s succeed!' % (app_id,state) except Exception, e: raise RuntimeError( 'update app %s state to %s failed! %s' % (app_id,state,e) )
update app state
Below is the the instruction that describes the task: ### Input: update app state ### Response: def app_update_state(app_id,state): """ update app state """ try: create_at = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') conn = get_conn() c = conn.cursor() c.execute("UPDATE app SET state='{0}',change_at='{1}' WHERE id='{2}'".format(state, create_at, app_id)) conn.commit() conn.close() print 'UPDATE app %s state to %s succeed!' % (app_id,state) except Exception, e: raise RuntimeError( 'update app %s state to %s failed! %s' % (app_id,state,e) )
def check(source): """Check code.""" compile(source, '<string>', 'exec', dont_inherit=True) reporter = pyflakes.reporter.Reporter(sys.stderr, sys.stderr) pyflakes.api.check(source, filename='<string>', reporter=reporter)
Check code.
Below is the the instruction that describes the task: ### Input: Check code. ### Response: def check(source): """Check code.""" compile(source, '<string>', 'exec', dont_inherit=True) reporter = pyflakes.reporter.Reporter(sys.stderr, sys.stderr) pyflakes.api.check(source, filename='<string>', reporter=reporter)
def ScanForVolumeSystem(self, source_path_spec): """Scans the path specification for a supported volume system format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: volume system path specification or None if no supported volume system type was found. Raises: BackEndError: if the source cannot be scanned or more than one volume system type is found. """ if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW: # It is technically possible to scan for VSS-in-VSS but makes no sense # to do so. return None if source_path_spec.IsVolumeSystemRoot(): return source_path_spec if source_path_spec.type_indicator == ( definitions.TYPE_INDICATOR_APFS_CONTAINER): # TODO: consider changes this when upstream changes have been made. # Currently pyfsapfs does not support reading from a volume as a device. # Also see: https://github.com/log2timeline/dfvfs/issues/332 return None try: type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators( source_path_spec, resolver_context=self._resolver_context) except (IOError, RuntimeError) as exception: raise errors.BackEndError(( 'Unable to process source path specification with error: ' '{0!s}').format(exception)) if not type_indicators: return None if len(type_indicators) > 1: raise errors.BackEndError( 'Unsupported source found more than one volume system types.') if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and source_path_spec.type_indicator in [ definitions.TYPE_INDICATOR_TSK_PARTITION]): return None if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS: return path_spec_factory.Factory.NewPathSpec( type_indicators[0], location='/', parent=source_path_spec) return path_spec_factory.Factory.NewPathSpec( type_indicators[0], parent=source_path_spec)
Scans the path specification for a supported volume system format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: volume system path specification or None if no supported volume system type was found. Raises: BackEndError: if the source cannot be scanned or more than one volume system type is found.
Below is the the instruction that describes the task: ### Input: Scans the path specification for a supported volume system format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: volume system path specification or None if no supported volume system type was found. Raises: BackEndError: if the source cannot be scanned or more than one volume system type is found. ### Response: def ScanForVolumeSystem(self, source_path_spec): """Scans the path specification for a supported volume system format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: volume system path specification or None if no supported volume system type was found. Raises: BackEndError: if the source cannot be scanned or more than one volume system type is found. """ if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW: # It is technically possible to scan for VSS-in-VSS but makes no sense # to do so. return None if source_path_spec.IsVolumeSystemRoot(): return source_path_spec if source_path_spec.type_indicator == ( definitions.TYPE_INDICATOR_APFS_CONTAINER): # TODO: consider changes this when upstream changes have been made. # Currently pyfsapfs does not support reading from a volume as a device. # Also see: https://github.com/log2timeline/dfvfs/issues/332 return None try: type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators( source_path_spec, resolver_context=self._resolver_context) except (IOError, RuntimeError) as exception: raise errors.BackEndError(( 'Unable to process source path specification with error: ' '{0!s}').format(exception)) if not type_indicators: return None if len(type_indicators) > 1: raise errors.BackEndError( 'Unsupported source found more than one volume system types.') if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and source_path_spec.type_indicator in [ definitions.TYPE_INDICATOR_TSK_PARTITION]): return None if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS: return path_spec_factory.Factory.NewPathSpec( type_indicators[0], location='/', parent=source_path_spec) return path_spec_factory.Factory.NewPathSpec( type_indicators[0], parent=source_path_spec)
def _process_sasl_success(self, stream, element): """Process incoming <sasl:success/> element. [initiating entity only] """ if not self.authenticator: logger.debug("Unexpected SASL response") return False content = element.text if content: data = a2b_base64(content.encode("us-ascii")) else: data = None ret = self.authenticator.finish(data) if isinstance(ret, sasl.Success): logger.debug("SASL authentication succeeded") authzid = ret.properties.get("authzid") if authzid: me = JID(authzid) elif "username" in ret.properties: # FIXME: other rules for server me = JID(ret.properties["username"], stream.peer.domain) else: me = None stream.set_authenticated(me, True) else: logger.debug("SASL authentication failed") raise SASLAuthenticationFailed("Additional success data" " procesing failed") return True
Process incoming <sasl:success/> element. [initiating entity only]
Below is the the instruction that describes the task: ### Input: Process incoming <sasl:success/> element. [initiating entity only] ### Response: def _process_sasl_success(self, stream, element): """Process incoming <sasl:success/> element. [initiating entity only] """ if not self.authenticator: logger.debug("Unexpected SASL response") return False content = element.text if content: data = a2b_base64(content.encode("us-ascii")) else: data = None ret = self.authenticator.finish(data) if isinstance(ret, sasl.Success): logger.debug("SASL authentication succeeded") authzid = ret.properties.get("authzid") if authzid: me = JID(authzid) elif "username" in ret.properties: # FIXME: other rules for server me = JID(ret.properties["username"], stream.peer.domain) else: me = None stream.set_authenticated(me, True) else: logger.debug("SASL authentication failed") raise SASLAuthenticationFailed("Additional success data" " procesing failed") return True
def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A file for overlaying POVRay files generated from NeuroML by NeuroML1ToPOVRay.py with cell activity (e.g. as generated from a neuroConstruct simulation)") parser.add_argument('prefix', type=str, metavar='<network prefix>', help='Prefix for files in PovRay, e.g. use PREFIX is files are PREFIX.pov, PREFIX_net.inc, etc.') parser.add_argument('-activity', action='store_true', default=False, help="If this is specified, overlay network activity (not tested!!)") parser.add_argument('-maxV', type=float, metavar='<maxV>', default=50.0, help='Max voltage for colour scale in mV') parser.add_argument('-minV', type=float, metavar='<minV>', default=-90.0, help='Min voltage for colour scale in mV') parser.add_argument('-startTime', type=float, metavar='<startTime>', default=0, help='Time in ms at which to start overlaying the simulation activity') parser.add_argument('-endTime', type=float, metavar='<endTime>', default=100, help='End time of simulation activity in ms') parser.add_argument('-title', type=str, metavar='<title>', default='Movie generated from neuroConstruct simulation', help='Title for movie') parser.add_argument('-left', type=str, metavar='<left info>', default='', help='Text on left') parser.add_argument('-frames', type=int, metavar='<frames>', default=100, help='Number of frames') parser.add_argument('-name', type=str, metavar='<Movie name>', default='output', help='Movie name') return parser.parse_args()
Parse command-line arguments.
Below is the the instruction that describes the task: ### Input: Parse command-line arguments. ### Response: def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A file for overlaying POVRay files generated from NeuroML by NeuroML1ToPOVRay.py with cell activity (e.g. as generated from a neuroConstruct simulation)") parser.add_argument('prefix', type=str, metavar='<network prefix>', help='Prefix for files in PovRay, e.g. use PREFIX is files are PREFIX.pov, PREFIX_net.inc, etc.') parser.add_argument('-activity', action='store_true', default=False, help="If this is specified, overlay network activity (not tested!!)") parser.add_argument('-maxV', type=float, metavar='<maxV>', default=50.0, help='Max voltage for colour scale in mV') parser.add_argument('-minV', type=float, metavar='<minV>', default=-90.0, help='Min voltage for colour scale in mV') parser.add_argument('-startTime', type=float, metavar='<startTime>', default=0, help='Time in ms at which to start overlaying the simulation activity') parser.add_argument('-endTime', type=float, metavar='<endTime>', default=100, help='End time of simulation activity in ms') parser.add_argument('-title', type=str, metavar='<title>', default='Movie generated from neuroConstruct simulation', help='Title for movie') parser.add_argument('-left', type=str, metavar='<left info>', default='', help='Text on left') parser.add_argument('-frames', type=int, metavar='<frames>', default=100, help='Number of frames') parser.add_argument('-name', type=str, metavar='<Movie name>', default='output', help='Movie name') return parser.parse_args()
def get_group(self, group_id): """ Get a single group """ url = self.TEAM_GROUPS_ID_URL % group_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
Get a single group
Below is the the instruction that describes the task: ### Input: Get a single group ### Response: def get_group(self, group_id): """ Get a single group """ url = self.TEAM_GROUPS_ID_URL % group_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.get_request()
def check_token(self, renew=True): """ Checks the exp attribute of the access_token and either refreshes the tokens by calling the renew_access_tokens method or does nothing :param renew: bool indicating whether to refresh on expiration :return: bool indicating whether access_token has expired """ if not self.access_token: raise AttributeError('Access Token Required to Check Token') now = datetime.datetime.now() dec_access_token = jwt.get_unverified_claims(self.access_token) if now > datetime.datetime.fromtimestamp(dec_access_token['exp']): expired = True if renew: self.renew_access_token() else: expired = False return expired
Checks the exp attribute of the access_token and either refreshes the tokens by calling the renew_access_tokens method or does nothing :param renew: bool indicating whether to refresh on expiration :return: bool indicating whether access_token has expired
Below is the the instruction that describes the task: ### Input: Checks the exp attribute of the access_token and either refreshes the tokens by calling the renew_access_tokens method or does nothing :param renew: bool indicating whether to refresh on expiration :return: bool indicating whether access_token has expired ### Response: def check_token(self, renew=True): """ Checks the exp attribute of the access_token and either refreshes the tokens by calling the renew_access_tokens method or does nothing :param renew: bool indicating whether to refresh on expiration :return: bool indicating whether access_token has expired """ if not self.access_token: raise AttributeError('Access Token Required to Check Token') now = datetime.datetime.now() dec_access_token = jwt.get_unverified_claims(self.access_token) if now > datetime.datetime.fromtimestamp(dec_access_token['exp']): expired = True if renew: self.renew_access_token() else: expired = False return expired
def close(self): ''' Close the network connection and perform any other required cleanup Note: Auto closed when using goose as a context manager or when garbage collected ''' if self.fetcher is not None: self.shutdown_network() self.finalizer.atexit = False
Close the network connection and perform any other required cleanup Note: Auto closed when using goose as a context manager or when garbage collected
Below is the the instruction that describes the task: ### Input: Close the network connection and perform any other required cleanup Note: Auto closed when using goose as a context manager or when garbage collected ### Response: def close(self): ''' Close the network connection and perform any other required cleanup Note: Auto closed when using goose as a context manager or when garbage collected ''' if self.fetcher is not None: self.shutdown_network() self.finalizer.atexit = False
def send_message(self, message): ''' Send a Bokeh Server protocol message to the connected client. Args: message (Message) : a message to send ''' try: if _message_test_port is not None: _message_test_port.sent.append(message) yield message.send(self) except (WebSocketClosedError, StreamClosedError): # Tornado 4.x may raise StreamClosedError # on_close() is / will be called anyway log.warning("Failed sending message as connection was closed") raise gen.Return(None)
Send a Bokeh Server protocol message to the connected client. Args: message (Message) : a message to send
Below is the the instruction that describes the task: ### Input: Send a Bokeh Server protocol message to the connected client. Args: message (Message) : a message to send ### Response: def send_message(self, message): ''' Send a Bokeh Server protocol message to the connected client. Args: message (Message) : a message to send ''' try: if _message_test_port is not None: _message_test_port.sent.append(message) yield message.send(self) except (WebSocketClosedError, StreamClosedError): # Tornado 4.x may raise StreamClosedError # on_close() is / will be called anyway log.warning("Failed sending message as connection was closed") raise gen.Return(None)
def nvmlDeviceGetBoardId(handle): r""" /** * Retrieves the device boardId from 0-N. * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will * always return those values but they will always be different from each other). * * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param boardId Reference in which to return the device's board ID * * @return * - \ref NVML_SUCCESS if \a boardId has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetBoardId """ c_id = c_uint(); fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId") ret = fn(handle, byref(c_id)) _nvmlCheckReturn(ret) return bytes_to_str(c_id.value)
r""" /** * Retrieves the device boardId from 0-N. * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will * always return those values but they will always be different from each other). * * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param boardId Reference in which to return the device's board ID * * @return * - \ref NVML_SUCCESS if \a boardId has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetBoardId
Below is the the instruction that describes the task: ### Input: r""" /** * Retrieves the device boardId from 0-N. * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will * always return those values but they will always be different from each other). * * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param boardId Reference in which to return the device's board ID * * @return * - \ref NVML_SUCCESS if \a boardId has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetBoardId ### Response: def nvmlDeviceGetBoardId(handle): r""" /** * Retrieves the device boardId from 0-N. * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will * always return those values but they will always be different from each other). * * * For Fermi &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param boardId Reference in which to return the device's board ID * * @return * - \ref NVML_SUCCESS if \a boardId has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetBoardId """ c_id = c_uint(); fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId") ret = fn(handle, byref(c_id)) _nvmlCheckReturn(ret) return bytes_to_str(c_id.value)
def try_fields(cls, *names) -> t.Optional[t.Any]: """Return first existing of given class field names.""" for name in names: if hasattr(cls, name): return getattr(cls, name) raise AttributeError((cls, names))
Return first existing of given class field names.
Below is the the instruction that describes the task: ### Input: Return first existing of given class field names. ### Response: def try_fields(cls, *names) -> t.Optional[t.Any]: """Return first existing of given class field names.""" for name in names: if hasattr(cls, name): return getattr(cls, name) raise AttributeError((cls, names))
def HashFile(self, fd, byte_count): """Updates underlying hashers with a given file. Args: fd: A file object that is going to be fed to the hashers. byte_count: A maximum number of bytes that are going to be processed. """ while byte_count > 0: buf_size = min(byte_count, constants.CLIENT_MAX_BUFFER_SIZE) buf = fd.read(buf_size) if not buf: break self.HashBuffer(buf) byte_count -= buf_size
Updates underlying hashers with a given file. Args: fd: A file object that is going to be fed to the hashers. byte_count: A maximum number of bytes that are going to be processed.
Below is the the instruction that describes the task: ### Input: Updates underlying hashers with a given file. Args: fd: A file object that is going to be fed to the hashers. byte_count: A maximum number of bytes that are going to be processed. ### Response: def HashFile(self, fd, byte_count): """Updates underlying hashers with a given file. Args: fd: A file object that is going to be fed to the hashers. byte_count: A maximum number of bytes that are going to be processed. """ while byte_count > 0: buf_size = min(byte_count, constants.CLIENT_MAX_BUFFER_SIZE) buf = fd.read(buf_size) if not buf: break self.HashBuffer(buf) byte_count -= buf_size
def __train(self, n_clusters=4): """ Calculate cluster's centroids and standard deviations. If there are at least the number of threshold rows \ then: * Observations will be normalised. * Standard deviations will be returned. * Clusters will be returned. * Centroids are ordered based on their distance from an arbitrary -100, -100 point. If there are not enough Observations, then centroids and standard deviations will be set to the empty list. General strategy: Use numpy.array for calculations. Keep everything in float. Convert arrays back to lists \ at the end. :param n_clusters: the number of clusters :type n_clusters: int """ try: for obs in self.observations: features, ids = self.__get_features_for_observation(observation=obs, last_column_is_id=True) # the last column is the observation id normalised_data = whiten(features) # skip any rows that contain just zero values... they create nans first_safe_row = pdkit.utils.non_zero_index(normalised_data) observation_ids = features.tolist() sd = features[first_safe_row] / normalised_data[first_safe_row] # Calculate centroids and sort result centroids_array, _ = kmeans(normalised_data, n_clusters) sorted_centroids = pdkit.utils.centroid_sort(centroids_array) if not self.clusters: self.clusters = [[obs, sd.tolist(), sorted_centroids.tolist()]] else: self.clusters.append([obs, sd.tolist(),sorted_centroids.tolist()]) except IOError as e: ierr = "({}): {}".format(e.errno, e.strerror) logging.error("Error training UPDRS, file not found, I/O error %s", ierr) except ValueError as verr: logging.error("Error training UPDRS ValueError ->%s", verr.message) except: logging.error("Unexpected error on training UPDRS init: %s", sys.exc_info()[0])
Calculate cluster's centroids and standard deviations. If there are at least the number of threshold rows \ then: * Observations will be normalised. * Standard deviations will be returned. * Clusters will be returned. * Centroids are ordered based on their distance from an arbitrary -100, -100 point. If there are not enough Observations, then centroids and standard deviations will be set to the empty list. General strategy: Use numpy.array for calculations. Keep everything in float. Convert arrays back to lists \ at the end. :param n_clusters: the number of clusters :type n_clusters: int
Below is the the instruction that describes the task: ### Input: Calculate cluster's centroids and standard deviations. If there are at least the number of threshold rows \ then: * Observations will be normalised. * Standard deviations will be returned. * Clusters will be returned. * Centroids are ordered based on their distance from an arbitrary -100, -100 point. If there are not enough Observations, then centroids and standard deviations will be set to the empty list. General strategy: Use numpy.array for calculations. Keep everything in float. Convert arrays back to lists \ at the end. :param n_clusters: the number of clusters :type n_clusters: int ### Response: def __train(self, n_clusters=4): """ Calculate cluster's centroids and standard deviations. If there are at least the number of threshold rows \ then: * Observations will be normalised. * Standard deviations will be returned. * Clusters will be returned. * Centroids are ordered based on their distance from an arbitrary -100, -100 point. If there are not enough Observations, then centroids and standard deviations will be set to the empty list. General strategy: Use numpy.array for calculations. Keep everything in float. Convert arrays back to lists \ at the end. :param n_clusters: the number of clusters :type n_clusters: int """ try: for obs in self.observations: features, ids = self.__get_features_for_observation(observation=obs, last_column_is_id=True) # the last column is the observation id normalised_data = whiten(features) # skip any rows that contain just zero values... they create nans first_safe_row = pdkit.utils.non_zero_index(normalised_data) observation_ids = features.tolist() sd = features[first_safe_row] / normalised_data[first_safe_row] # Calculate centroids and sort result centroids_array, _ = kmeans(normalised_data, n_clusters) sorted_centroids = pdkit.utils.centroid_sort(centroids_array) if not self.clusters: self.clusters = [[obs, sd.tolist(), sorted_centroids.tolist()]] else: self.clusters.append([obs, sd.tolist(),sorted_centroids.tolist()]) except IOError as e: ierr = "({}): {}".format(e.errno, e.strerror) logging.error("Error training UPDRS, file not found, I/O error %s", ierr) except ValueError as verr: logging.error("Error training UPDRS ValueError ->%s", verr.message) except: logging.error("Unexpected error on training UPDRS init: %s", sys.exc_info()[0])
def encode_request(username, password, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(username, password, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(username) client_message.append_str(password) client_message.append_bool(uuid is None) if uuid is not None: client_message.append_str(uuid) client_message.append_bool(owner_uuid is None) if owner_uuid is not None: client_message.append_str(owner_uuid) client_message.append_bool(is_owner_connection) client_message.append_str(client_type) client_message.append_byte(serialization_version) client_message.append_str(client_hazelcast_version) client_message.update_frame_length() return client_message
Encode request into client_message
Below is the the instruction that describes the task: ### Input: Encode request into client_message ### Response: def encode_request(username, password, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(username, password, uuid, owner_uuid, is_owner_connection, client_type, serialization_version, client_hazelcast_version)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(username) client_message.append_str(password) client_message.append_bool(uuid is None) if uuid is not None: client_message.append_str(uuid) client_message.append_bool(owner_uuid is None) if owner_uuid is not None: client_message.append_str(owner_uuid) client_message.append_bool(is_owner_connection) client_message.append_str(client_type) client_message.append_byte(serialization_version) client_message.append_str(client_hazelcast_version) client_message.update_frame_length() return client_message
def _generate_barcode_ids(info_iter): """Create unique barcode IDs assigned to sequences """ bc_type = "SampleSheet" barcodes = list(set([x[-1] for x in info_iter])) barcodes.sort() barcode_ids = {} for i, bc in enumerate(barcodes): barcode_ids[bc] = (bc_type, i+1) return barcode_ids
Create unique barcode IDs assigned to sequences
Below is the the instruction that describes the task: ### Input: Create unique barcode IDs assigned to sequences ### Response: def _generate_barcode_ids(info_iter): """Create unique barcode IDs assigned to sequences """ bc_type = "SampleSheet" barcodes = list(set([x[-1] for x in info_iter])) barcodes.sort() barcode_ids = {} for i, bc in enumerate(barcodes): barcode_ids[bc] = (bc_type, i+1) return barcode_ids
def add_on_gpu(self, transforms): """Add some transforms.""" if not isinstance(transforms, list): transforms = [transforms] self.gpu_transforms.extend(transforms or []) return self
Add some transforms.
Below is the the instruction that describes the task: ### Input: Add some transforms. ### Response: def add_on_gpu(self, transforms): """Add some transforms.""" if not isinstance(transforms, list): transforms = [transforms] self.gpu_transforms.extend(transforms or []) return self
def nat_gateway_present(name, subnet_name=None, subnet_id=None, region=None, key=None, keyid=None, profile=None, allocation_id=None): ''' Ensure a nat gateway exists within the specified subnet This function requires boto3. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.nat_gateway_present: - subnet_name: my-subnet name Name of the state subnet_name Name of the subnet within which the nat gateway should exist subnet_id Id of the subnet within which the nat gateway should exist. Either subnet_name or subnet_id must be provided. allocation_id If specified, the elastic IP address referenced by the ID is associated with the gateway. Otherwise, a new allocation_id is created and used. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } r = __salt__['boto_vpc.describe_nat_gateways'](subnet_name=subnet_name, subnet_id=subnet_id, region=region, key=key, keyid=keyid, profile=profile) if not r: if __opts__['test']: msg = 'Nat gateway is set to be created.' ret['comment'] = msg ret['result'] = None return ret r = __salt__['boto_vpc.create_nat_gateway'](subnet_name=subnet_name, subnet_id=subnet_id, region=region, key=key, keyid=keyid, profile=profile, allocation_id=allocation_id) if not r.get('created'): ret['result'] = False ret['comment'] = 'Failed to create nat gateway: {0}.'.format(r['error']['message']) return ret ret['changes']['old'] = {'nat_gateway': None} ret['changes']['new'] = {'nat_gateway': r['id']} ret['comment'] = 'Nat gateway created.' return ret inst = r[0] _id = inst.get('NatGatewayId') ret['comment'] = 'Nat gateway {0} present.'.format(_id) return ret
Ensure a nat gateway exists within the specified subnet This function requires boto3. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.nat_gateway_present: - subnet_name: my-subnet name Name of the state subnet_name Name of the subnet within which the nat gateway should exist subnet_id Id of the subnet within which the nat gateway should exist. Either subnet_name or subnet_id must be provided. allocation_id If specified, the elastic IP address referenced by the ID is associated with the gateway. Otherwise, a new allocation_id is created and used. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
Below is the the instruction that describes the task: ### Input: Ensure a nat gateway exists within the specified subnet This function requires boto3. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.nat_gateway_present: - subnet_name: my-subnet name Name of the state subnet_name Name of the subnet within which the nat gateway should exist subnet_id Id of the subnet within which the nat gateway should exist. Either subnet_name or subnet_id must be provided. allocation_id If specified, the elastic IP address referenced by the ID is associated with the gateway. Otherwise, a new allocation_id is created and used. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ### Response: def nat_gateway_present(name, subnet_name=None, subnet_id=None, region=None, key=None, keyid=None, profile=None, allocation_id=None): ''' Ensure a nat gateway exists within the specified subnet This function requires boto3. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.nat_gateway_present: - subnet_name: my-subnet name Name of the state subnet_name Name of the subnet within which the nat gateway should exist subnet_id Id of the subnet within which the nat gateway should exist. Either subnet_name or subnet_id must be provided. allocation_id If specified, the elastic IP address referenced by the ID is associated with the gateway. Otherwise, a new allocation_id is created and used. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } r = __salt__['boto_vpc.describe_nat_gateways'](subnet_name=subnet_name, subnet_id=subnet_id, region=region, key=key, keyid=keyid, profile=profile) if not r: if __opts__['test']: msg = 'Nat gateway is set to be created.' ret['comment'] = msg ret['result'] = None return ret r = __salt__['boto_vpc.create_nat_gateway'](subnet_name=subnet_name, subnet_id=subnet_id, region=region, key=key, keyid=keyid, profile=profile, allocation_id=allocation_id) if not r.get('created'): ret['result'] = False ret['comment'] = 'Failed to create nat gateway: {0}.'.format(r['error']['message']) return ret ret['changes']['old'] = {'nat_gateway': None} ret['changes']['new'] = {'nat_gateway': r['id']} ret['comment'] = 'Nat gateway created.' return ret inst = r[0] _id = inst.get('NatGatewayId') ret['comment'] = 'Nat gateway {0} present.'.format(_id) return ret
def lmx_base(): """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" hparams = transformer.transformer_tpu() # sharing is counterproductive when underparameterized hparams.shared_embedding_and_softmax_weights = False # we judge by log-ppl, so label smoothing hurts. hparams.label_smoothing = 0.0 # This makes the batch size on GPU the same as on TPU for a packed problem # with sequence length 256. # TODO(noam): fix the mess that is the data reading pipeline. hparams.max_length = 256 # larger batch since we only have a decoder hparams.batch_size = 4096 # save some memory so we can have a larger model hparams.activation_dtype = "bfloat16" return hparams
Transformer on languagemodel_lm1b32k_packed. 50M Params.
Below is the the instruction that describes the task: ### Input: Transformer on languagemodel_lm1b32k_packed. 50M Params. ### Response: def lmx_base(): """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" hparams = transformer.transformer_tpu() # sharing is counterproductive when underparameterized hparams.shared_embedding_and_softmax_weights = False # we judge by log-ppl, so label smoothing hurts. hparams.label_smoothing = 0.0 # This makes the batch size on GPU the same as on TPU for a packed problem # with sequence length 256. # TODO(noam): fix the mess that is the data reading pipeline. hparams.max_length = 256 # larger batch since we only have a decoder hparams.batch_size = 4096 # save some memory so we can have a larger model hparams.activation_dtype = "bfloat16" return hparams
def prior_sediment_memory(*args, **kwargs): """Get the prior density of sediment memory Returns ------- y : ndarray Array giving the density. x : ndarray Array of Memory (ratio) values over which the density was evaluated. """ # "plot the prior for the memory (= accumulation rate varibility between neighbouring depths)" # PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141 # w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean) # TODO(brews): Check that these stats are correctly translated to scipy.stats distribs. mem_shape = kwargs['mem_strength'] # aka. `mem_shape` mem_mean = kwargs['mem_mean'] x = np.linspace(0, 1, 100) y = stats.beta.pdf(x, a=mem_shape * mem_mean, b=mem_shape * (1 - mem_mean)) return y, x
Get the prior density of sediment memory Returns ------- y : ndarray Array giving the density. x : ndarray Array of Memory (ratio) values over which the density was evaluated.
Below is the the instruction that describes the task: ### Input: Get the prior density of sediment memory Returns ------- y : ndarray Array giving the density. x : ndarray Array of Memory (ratio) values over which the density was evaluated. ### Response: def prior_sediment_memory(*args, **kwargs): """Get the prior density of sediment memory Returns ------- y : ndarray Array giving the density. x : ndarray Array of Memory (ratio) values over which the density was evaluated. """ # "plot the prior for the memory (= accumulation rate varibility between neighbouring depths)" # PlotMemPrior @ Bacon.R ln 114 -> ln 1119 - 1141 # w_a = mem_strength * mem_mean, w_b = mem_strength * (1 - mem_mean) # TODO(brews): Check that these stats are correctly translated to scipy.stats distribs. mem_shape = kwargs['mem_strength'] # aka. `mem_shape` mem_mean = kwargs['mem_mean'] x = np.linspace(0, 1, 100) y = stats.beta.pdf(x, a=mem_shape * mem_mean, b=mem_shape * (1 - mem_mean)) return y, x
def send(self, request, **kwargs): # type: (ClientRequest, Any) -> ClientResponse """Send request object according to configuration. Allowed kwargs are: - session : will override the driver session and use yours. Should NOT be done unless really required. - anything else is sent straight to requests. :param ClientRequest request: The request object to be sent. """ # It's not recommended to provide its own session, and is mostly # to enable some legacy code to plug correctly session = kwargs.pop('session', self.session) try: response = session.request( request.method, request.url, **kwargs) except requests.RequestException as err: msg = "Error occurred in request." raise_with_traceback(ClientRequestError, msg, err) return RequestsClientResponse(request, response)
Send request object according to configuration. Allowed kwargs are: - session : will override the driver session and use yours. Should NOT be done unless really required. - anything else is sent straight to requests. :param ClientRequest request: The request object to be sent.
Below is the the instruction that describes the task: ### Input: Send request object according to configuration. Allowed kwargs are: - session : will override the driver session and use yours. Should NOT be done unless really required. - anything else is sent straight to requests. :param ClientRequest request: The request object to be sent. ### Response: def send(self, request, **kwargs): # type: (ClientRequest, Any) -> ClientResponse """Send request object according to configuration. Allowed kwargs are: - session : will override the driver session and use yours. Should NOT be done unless really required. - anything else is sent straight to requests. :param ClientRequest request: The request object to be sent. """ # It's not recommended to provide its own session, and is mostly # to enable some legacy code to plug correctly session = kwargs.pop('session', self.session) try: response = session.request( request.method, request.url, **kwargs) except requests.RequestException as err: msg = "Error occurred in request." raise_with_traceback(ClientRequestError, msg, err) return RequestsClientResponse(request, response)
def create_opengl_context(surface_size=(640, 480)): """Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface. """ egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) major, minor = egl.EGLint(), egl.EGLint() egl.eglInitialize(egl_display, pointer(major), pointer(minor)) config_attribs = [ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE ] config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) num_configs = egl.EGLint() egl_cfg = egl.EGLConfig() egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) width, height = surface_size pbuffer_attribs = [ egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE, ] pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) egl.eglBindAPI(egl.EGL_OPENGL_API) egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None) egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface.
Below is the the instruction that describes the task: ### Input: Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface. ### Response: def create_opengl_context(surface_size=(640, 480)): """Create offscreen OpenGL context and make it current. Users are expected to directly use EGL API in case more advanced context management is required. Args: surface_size: (width, height), size of the offscreen rendering surface. """ egl_display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY) major, minor = egl.EGLint(), egl.EGLint() egl.eglInitialize(egl_display, pointer(major), pointer(minor)) config_attribs = [ egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT, egl.EGL_BLUE_SIZE, 8, egl.EGL_GREEN_SIZE, 8, egl.EGL_RED_SIZE, 8, egl.EGL_DEPTH_SIZE, 24, egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT, egl.EGL_NONE ] config_attribs = (egl.EGLint * len(config_attribs))(*config_attribs) num_configs = egl.EGLint() egl_cfg = egl.EGLConfig() egl.eglChooseConfig(egl_display, config_attribs, pointer(egl_cfg), 1, pointer(num_configs)) width, height = surface_size pbuffer_attribs = [ egl.EGL_WIDTH, width, egl.EGL_HEIGHT, height, egl.EGL_NONE, ] pbuffer_attribs = (egl.EGLint * len(pbuffer_attribs))(*pbuffer_attribs) egl_surf = egl.eglCreatePbufferSurface(egl_display, egl_cfg, pbuffer_attribs) egl.eglBindAPI(egl.EGL_OPENGL_API) egl_context = egl.eglCreateContext(egl_display, egl_cfg, egl.EGL_NO_CONTEXT, None) egl.eglMakeCurrent(egl_display, egl_surf, egl_surf, egl_context)
def transform(x): """ Transform from Timeddelta to numerical format """ # microseconds try: x = np.array([_x.total_seconds()*10**6 for _x in x]) except TypeError: x = x.total_seconds()*10**6 return x
Transform from Timeddelta to numerical format
Below is the the instruction that describes the task: ### Input: Transform from Timeddelta to numerical format ### Response: def transform(x): """ Transform from Timeddelta to numerical format """ # microseconds try: x = np.array([_x.total_seconds()*10**6 for _x in x]) except TypeError: x = x.total_seconds()*10**6 return x
def create(self, unique_name=values.unset, friendly_name=values.unset, identity=values.unset, deployment_sid=values.unset, enabled=values.unset): """ Create a new DeviceInstance :param unicode unique_name: A unique, addressable name of this Device. :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Newly created DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance """ data = values.of({ 'UniqueName': unique_name, 'FriendlyName': friendly_name, 'Identity': identity, 'DeploymentSid': deployment_sid, 'Enabled': enabled, }) payload = self._version.create( 'POST', self._uri, data=data, ) return DeviceInstance(self._version, payload, fleet_sid=self._solution['fleet_sid'], )
Create a new DeviceInstance :param unicode unique_name: A unique, addressable name of this Device. :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Newly created DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
Below is the the instruction that describes the task: ### Input: Create a new DeviceInstance :param unicode unique_name: A unique, addressable name of this Device. :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Newly created DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance ### Response: def create(self, unique_name=values.unset, friendly_name=values.unset, identity=values.unset, deployment_sid=values.unset, enabled=values.unset): """ Create a new DeviceInstance :param unicode unique_name: A unique, addressable name of this Device. :param unicode friendly_name: A human readable description for this Device. :param unicode identity: An identifier of the Device user. :param unicode deployment_sid: The unique SID of the Deployment group. :param bool enabled: The enabled :returns: Newly created DeviceInstance :rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance """ data = values.of({ 'UniqueName': unique_name, 'FriendlyName': friendly_name, 'Identity': identity, 'DeploymentSid': deployment_sid, 'Enabled': enabled, }) payload = self._version.create( 'POST', self._uri, data=data, ) return DeviceInstance(self._version, payload, fleet_sid=self._solution['fleet_sid'], )
def _unregister_all_factories(self): """ Unregisters all factories. This method should be called only after the iPOPO service has been unregistered (that's why it's not locked) """ factories = list(self.__factories.keys()) for factory_name in factories: self.unregister_factory(factory_name)
Unregisters all factories. This method should be called only after the iPOPO service has been unregistered (that's why it's not locked)
Below is the the instruction that describes the task: ### Input: Unregisters all factories. This method should be called only after the iPOPO service has been unregistered (that's why it's not locked) ### Response: def _unregister_all_factories(self): """ Unregisters all factories. This method should be called only after the iPOPO service has been unregistered (that's why it's not locked) """ factories = list(self.__factories.keys()) for factory_name in factories: self.unregister_factory(factory_name)
def _empty_cache(self, termlist=None): """Empty the cache associated with each `Term` instance. This method is called when merging Ontologies or including new terms in the Ontology to make sure the cache of each term is cleaned and avoid returning wrong memoized values (such as Term.rchildren() TermLists, which get memoized for performance concerns) """ if termlist is None: for term in six.itervalues(self.terms): term._empty_cache() else: for term in termlist: try: self.terms[term.id]._empty_cache() except AttributeError: self.terms[term]._empty_cache()
Empty the cache associated with each `Term` instance. This method is called when merging Ontologies or including new terms in the Ontology to make sure the cache of each term is cleaned and avoid returning wrong memoized values (such as Term.rchildren() TermLists, which get memoized for performance concerns)
Below is the the instruction that describes the task: ### Input: Empty the cache associated with each `Term` instance. This method is called when merging Ontologies or including new terms in the Ontology to make sure the cache of each term is cleaned and avoid returning wrong memoized values (such as Term.rchildren() TermLists, which get memoized for performance concerns) ### Response: def _empty_cache(self, termlist=None): """Empty the cache associated with each `Term` instance. This method is called when merging Ontologies or including new terms in the Ontology to make sure the cache of each term is cleaned and avoid returning wrong memoized values (such as Term.rchildren() TermLists, which get memoized for performance concerns) """ if termlist is None: for term in six.itervalues(self.terms): term._empty_cache() else: for term in termlist: try: self.terms[term.id]._empty_cache() except AttributeError: self.terms[term]._empty_cache()
def GetRootFileEntry(self): """Retrieves the root file entry. Returns: GzipFileEntry: a file entry or None if not available. """ path_spec = gzip_path_spec.GzipPathSpec(parent=self._path_spec.parent) return self.GetFileEntryByPathSpec(path_spec)
Retrieves the root file entry. Returns: GzipFileEntry: a file entry or None if not available.
Below is the the instruction that describes the task: ### Input: Retrieves the root file entry. Returns: GzipFileEntry: a file entry or None if not available. ### Response: def GetRootFileEntry(self): """Retrieves the root file entry. Returns: GzipFileEntry: a file entry or None if not available. """ path_spec = gzip_path_spec.GzipPathSpec(parent=self._path_spec.parent) return self.GetFileEntryByPathSpec(path_spec)
def user(request, username): """ ``User`` focused activity stream. (Eg: Profile page twitter.com/justquick) """ instance = get_object_or_404( USER_MODEL, **{'is_active': True, username_field: username} ) return render( request, 'actstream/actor.html', context={ 'ctype': ContentType.objects.get_for_model(USER_MODEL), 'actor': instance, 'action_list': models.user_stream(instance) } )
``User`` focused activity stream. (Eg: Profile page twitter.com/justquick)
Below is the the instruction that describes the task: ### Input: ``User`` focused activity stream. (Eg: Profile page twitter.com/justquick) ### Response: def user(request, username): """ ``User`` focused activity stream. (Eg: Profile page twitter.com/justquick) """ instance = get_object_or_404( USER_MODEL, **{'is_active': True, username_field: username} ) return render( request, 'actstream/actor.html', context={ 'ctype': ContentType.objects.get_for_model(USER_MODEL), 'actor': instance, 'action_list': models.user_stream(instance) } )
def to_dict(self): '''Return a dict of the attributes.''' return dict( raw=self.raw, scheme=self.scheme, authority=self.authority, netloc=self.authority, path=self.path, query=self.query, fragment=self.fragment, userinfo=self.userinfo, username=self.username, password=self.password, host=self.host, hostname=self.hostname, port=self.port, resource=self.resource, url=self.url, encoding=self.encoding, )
Return a dict of the attributes.
Below is the the instruction that describes the task: ### Input: Return a dict of the attributes. ### Response: def to_dict(self): '''Return a dict of the attributes.''' return dict( raw=self.raw, scheme=self.scheme, authority=self.authority, netloc=self.authority, path=self.path, query=self.query, fragment=self.fragment, userinfo=self.userinfo, username=self.username, password=self.password, host=self.host, hostname=self.hostname, port=self.port, resource=self.resource, url=self.url, encoding=self.encoding, )
def po_to_unicode(po_obj): """ Turns a polib :class:`polib.PoFile` or a :class:`polib.PoEntry` into a :class:`unicode` string. :param po_obj: Either a :class:`polib.PoFile` or :class:`polib.PoEntry`. :rtype: :class:`unicode` string. """ po_text = po_obj.__str__() if type(po_text) != types.UnicodeType: po_text = po_text.decode('utf-8') return po_text
Turns a polib :class:`polib.PoFile` or a :class:`polib.PoEntry` into a :class:`unicode` string. :param po_obj: Either a :class:`polib.PoFile` or :class:`polib.PoEntry`. :rtype: :class:`unicode` string.
Below is the the instruction that describes the task: ### Input: Turns a polib :class:`polib.PoFile` or a :class:`polib.PoEntry` into a :class:`unicode` string. :param po_obj: Either a :class:`polib.PoFile` or :class:`polib.PoEntry`. :rtype: :class:`unicode` string. ### Response: def po_to_unicode(po_obj): """ Turns a polib :class:`polib.PoFile` or a :class:`polib.PoEntry` into a :class:`unicode` string. :param po_obj: Either a :class:`polib.PoFile` or :class:`polib.PoEntry`. :rtype: :class:`unicode` string. """ po_text = po_obj.__str__() if type(po_text) != types.UnicodeType: po_text = po_text.decode('utf-8') return po_text
def persist(name, value, config='/etc/sysctl.conf'): ''' Assign and persist a simple sysctl parameter for this minion CLI Example: .. code-block:: bash salt '*' sysctl.persist net.inet.icmp.icmplim 50 ''' nlines = [] edited = False value = six.text_type(value) # create /etc/sysctl.conf if not present if not os.path.isfile(config): try: with salt.utils.files.fopen(config, 'w+'): pass except (IOError, OSError): msg = 'Could not create {0}' raise CommandExecutionError(msg.format(config)) with salt.utils.files.fopen(config, 'r') as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) m = re.match(r'{0}(\??=)'.format(name), line) if not m: nlines.append(line) continue else: key, rest = line.split('=', 1) if rest.startswith('"'): _, rest_v, rest = rest.split('"', 2) elif rest.startswith('\''): _, rest_v, rest = rest.split('\'', 2) else: rest_v = rest.split()[0] rest = rest[len(rest_v):] if rest_v == value: return 'Already set' new_line = '{0}{1}{2}{3}'.format(name, m.group(1), value, rest) nlines.append(new_line) edited = True if not edited: newline = '{0}={1}'.format(name, value) nlines.append("{0}\n".format(newline)) with salt.utils.files.fopen(config, 'wb') as ofile: ofile.writelines( salt.utils.data.encode(nlines) ) assign(name, value) return 'Updated'
Assign and persist a simple sysctl parameter for this minion CLI Example: .. code-block:: bash salt '*' sysctl.persist net.inet.icmp.icmplim 50
Below is the the instruction that describes the task: ### Input: Assign and persist a simple sysctl parameter for this minion CLI Example: .. code-block:: bash salt '*' sysctl.persist net.inet.icmp.icmplim 50 ### Response: def persist(name, value, config='/etc/sysctl.conf'): ''' Assign and persist a simple sysctl parameter for this minion CLI Example: .. code-block:: bash salt '*' sysctl.persist net.inet.icmp.icmplim 50 ''' nlines = [] edited = False value = six.text_type(value) # create /etc/sysctl.conf if not present if not os.path.isfile(config): try: with salt.utils.files.fopen(config, 'w+'): pass except (IOError, OSError): msg = 'Could not create {0}' raise CommandExecutionError(msg.format(config)) with salt.utils.files.fopen(config, 'r') as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) m = re.match(r'{0}(\??=)'.format(name), line) if not m: nlines.append(line) continue else: key, rest = line.split('=', 1) if rest.startswith('"'): _, rest_v, rest = rest.split('"', 2) elif rest.startswith('\''): _, rest_v, rest = rest.split('\'', 2) else: rest_v = rest.split()[0] rest = rest[len(rest_v):] if rest_v == value: return 'Already set' new_line = '{0}{1}{2}{3}'.format(name, m.group(1), value, rest) nlines.append(new_line) edited = True if not edited: newline = '{0}={1}'.format(name, value) nlines.append("{0}\n".format(newline)) with salt.utils.files.fopen(config, 'wb') as ofile: ofile.writelines( salt.utils.data.encode(nlines) ) assign(name, value) return 'Updated'
async def scale(self, scale=None, scale_change=None): """ Set or adjust the scale of this (K8s) application. One or the other of scale or scale_change must be provided. :param int scale: Scale to which to set this application. :param int scale_change: Amount by which to adjust the scale of this application (can be positive or negative). """ app_facade = client.ApplicationFacade.from_connection(self.connection) if (scale, scale_change) == (None, None): raise ValueError('Must provide either scale or scale_change') log.debug( 'Scaling application %s %s %s', self.name, 'to' if scale else 'by', scale or scale_change) await app_facade.ScaleApplications([ client.ScaleApplicationParam(application_tag=self.tag, scale=scale, scale_change=scale_change) ])
Set or adjust the scale of this (K8s) application. One or the other of scale or scale_change must be provided. :param int scale: Scale to which to set this application. :param int scale_change: Amount by which to adjust the scale of this application (can be positive or negative).
Below is the the instruction that describes the task: ### Input: Set or adjust the scale of this (K8s) application. One or the other of scale or scale_change must be provided. :param int scale: Scale to which to set this application. :param int scale_change: Amount by which to adjust the scale of this application (can be positive or negative). ### Response: async def scale(self, scale=None, scale_change=None): """ Set or adjust the scale of this (K8s) application. One or the other of scale or scale_change must be provided. :param int scale: Scale to which to set this application. :param int scale_change: Amount by which to adjust the scale of this application (can be positive or negative). """ app_facade = client.ApplicationFacade.from_connection(self.connection) if (scale, scale_change) == (None, None): raise ValueError('Must provide either scale or scale_change') log.debug( 'Scaling application %s %s %s', self.name, 'to' if scale else 'by', scale or scale_change) await app_facade.ScaleApplications([ client.ScaleApplicationParam(application_tag=self.tag, scale=scale, scale_change=scale_change) ])
def builder_types(cls) -> List[Type[ParameterBuilder]]: """ Define the available builder types. """ return [ entry_point.load() for entry_point in iter_entry_points(ENTRY_POINT) ]
Define the available builder types.
Below is the the instruction that describes the task: ### Input: Define the available builder types. ### Response: def builder_types(cls) -> List[Type[ParameterBuilder]]: """ Define the available builder types. """ return [ entry_point.load() for entry_point in iter_entry_points(ENTRY_POINT) ]
def log_entry_encode(self, id, num_logs, last_log_num, time_utc, size): ''' Reply to LOG_REQUEST_LIST id : Log id (uint16_t) num_logs : Total number of logs (uint16_t) last_log_num : High log number (uint16_t) time_utc : UTC timestamp of log in seconds since 1970, or 0 if not available (uint32_t) size : Size of the log (may be approximate) in bytes (uint32_t) ''' return MAVLink_log_entry_message(id, num_logs, last_log_num, time_utc, size)
Reply to LOG_REQUEST_LIST id : Log id (uint16_t) num_logs : Total number of logs (uint16_t) last_log_num : High log number (uint16_t) time_utc : UTC timestamp of log in seconds since 1970, or 0 if not available (uint32_t) size : Size of the log (may be approximate) in bytes (uint32_t)
Below is the the instruction that describes the task: ### Input: Reply to LOG_REQUEST_LIST id : Log id (uint16_t) num_logs : Total number of logs (uint16_t) last_log_num : High log number (uint16_t) time_utc : UTC timestamp of log in seconds since 1970, or 0 if not available (uint32_t) size : Size of the log (may be approximate) in bytes (uint32_t) ### Response: def log_entry_encode(self, id, num_logs, last_log_num, time_utc, size): ''' Reply to LOG_REQUEST_LIST id : Log id (uint16_t) num_logs : Total number of logs (uint16_t) last_log_num : High log number (uint16_t) time_utc : UTC timestamp of log in seconds since 1970, or 0 if not available (uint32_t) size : Size of the log (may be approximate) in bytes (uint32_t) ''' return MAVLink_log_entry_message(id, num_logs, last_log_num, time_utc, size)
def start_monitor(redis_address, stdout_file=None, stderr_file=None, autoscaling_config=None, redis_password=None): """Run a process to monitor the other processes. Args: redis_address (str): The address that the Redis server is listening on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. autoscaling_config: path to autoscaling config file. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. """ monitor_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "monitor.py") command = [ sys.executable, "-u", monitor_path, "--redis-address=" + str(redis_address) ] if autoscaling_config: command.append("--autoscaling-config=" + str(autoscaling_config)) if redis_password: command.append("--redis-password=" + redis_password) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
Run a process to monitor the other processes. Args: redis_address (str): The address that the Redis server is listening on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. autoscaling_config: path to autoscaling config file. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started.
Below is the the instruction that describes the task: ### Input: Run a process to monitor the other processes. Args: redis_address (str): The address that the Redis server is listening on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. autoscaling_config: path to autoscaling config file. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. ### Response: def start_monitor(redis_address, stdout_file=None, stderr_file=None, autoscaling_config=None, redis_password=None): """Run a process to monitor the other processes. Args: redis_address (str): The address that the Redis server is listening on. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. autoscaling_config: path to autoscaling config file. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. """ monitor_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "monitor.py") command = [ sys.executable, "-u", monitor_path, "--redis-address=" + str(redis_address) ] if autoscaling_config: command.append("--autoscaling-config=" + str(autoscaling_config)) if redis_password: command.append("--redis-password=" + redis_password) process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_MONITOR, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
def query(self, wql): """Connect by wmi and run wql.""" try: self.__wql = ['wmic', '-U', self.args.domain + '\\' + self.args.user + '%' + self.args.password, '//' + self.args.host, '--namespace', self.args.namespace, '--delimiter', self.args.delimiter, wql] self.logger.debug("wql: {}".format(self.__wql)) self.__output = subprocess.check_output(self.__wql) self.logger.debug("output: {}".format(self.__output)) self.logger.debug("wmi connect succeed.") self.__wmi_output = self.__output.splitlines()[1:] self.logger.debug("wmi_output: {}".format(self.__wmi_output)) self.__csv_header = csv.DictReader(self.__wmi_output, delimiter='|') self.logger.debug("csv_header: {}".format(self.__csv_header)) return list(self.__csv_header) except subprocess.CalledProcessError as e: self.unknown("Connect by wmi and run wql error: %s" % e)
Connect by wmi and run wql.
Below is the the instruction that describes the task: ### Input: Connect by wmi and run wql. ### Response: def query(self, wql): """Connect by wmi and run wql.""" try: self.__wql = ['wmic', '-U', self.args.domain + '\\' + self.args.user + '%' + self.args.password, '//' + self.args.host, '--namespace', self.args.namespace, '--delimiter', self.args.delimiter, wql] self.logger.debug("wql: {}".format(self.__wql)) self.__output = subprocess.check_output(self.__wql) self.logger.debug("output: {}".format(self.__output)) self.logger.debug("wmi connect succeed.") self.__wmi_output = self.__output.splitlines()[1:] self.logger.debug("wmi_output: {}".format(self.__wmi_output)) self.__csv_header = csv.DictReader(self.__wmi_output, delimiter='|') self.logger.debug("csv_header: {}".format(self.__csv_header)) return list(self.__csv_header) except subprocess.CalledProcessError as e: self.unknown("Connect by wmi and run wql error: %s" % e)
def get_recirc_content(self, published=True, count=3): """gets the first 3 content objects in the `included_ids` """ query = self.get_query() # check if query has included_ids & if there are any ids in it, # in case the ids have been removed from the array if not query.get('included_ids'): qs = Content.search_objects.search() qs = qs.query( TagBoost(slugs=self.tags.values_list("slug", flat=True)) ).filter( ~Ids(values=[self.id]) ).sort( "_score" ) return qs[:count] # NOTE: set included_ids to just be the first 3 ids, # otherwise search will return last 3 items query['included_ids'] = query['included_ids'][:count] search = custom_search_model(Content, query, published=published, field_map={ "feature_type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type" }) return search
gets the first 3 content objects in the `included_ids`
Below is the the instruction that describes the task: ### Input: gets the first 3 content objects in the `included_ids` ### Response: def get_recirc_content(self, published=True, count=3): """gets the first 3 content objects in the `included_ids` """ query = self.get_query() # check if query has included_ids & if there are any ids in it, # in case the ids have been removed from the array if not query.get('included_ids'): qs = Content.search_objects.search() qs = qs.query( TagBoost(slugs=self.tags.values_list("slug", flat=True)) ).filter( ~Ids(values=[self.id]) ).sort( "_score" ) return qs[:count] # NOTE: set included_ids to just be the first 3 ids, # otherwise search will return last 3 items query['included_ids'] = query['included_ids'][:count] search = custom_search_model(Content, query, published=published, field_map={ "feature_type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type" }) return search
def get_known_periods(self): """ Get the list of periods the variable value is known for. """ return list(self._memory_storage.get_known_periods()) + list(( self._disk_storage.get_known_periods() if self._disk_storage else []))
Get the list of periods the variable value is known for.
Below is the the instruction that describes the task: ### Input: Get the list of periods the variable value is known for. ### Response: def get_known_periods(self): """ Get the list of periods the variable value is known for. """ return list(self._memory_storage.get_known_periods()) + list(( self._disk_storage.get_known_periods() if self._disk_storage else []))
def parseprint(code, filename="<string>", mode="exec", **kwargs): """Parse some code from a string and pretty-print it.""" node = parse(code, mode=mode) # An ode to the code print(dump(node, **kwargs))
Parse some code from a string and pretty-print it.
Below is the the instruction that describes the task: ### Input: Parse some code from a string and pretty-print it. ### Response: def parseprint(code, filename="<string>", mode="exec", **kwargs): """Parse some code from a string and pretty-print it.""" node = parse(code, mode=mode) # An ode to the code print(dump(node, **kwargs))
def _restructure_if_volume_follows_journal(left, right): """Remove volume node if it follows a journal logically in the tree hierarchy. Args: left (ast.ASTElement): The journal KeywordOp node. right (ast.ASTElement): The rest of the tree to be restructured. Return: (ast.ASTElement): The restructured tree, with the volume node removed. Notes: This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with Keyword 'volume' and discards 'volume' KeywordOp node from the tree. """ def _get_volume_keyword_op_and_remaining_subtree(right_subtree): if isinstance(right_subtree, NotOp) and isinstance(right_subtree.op, KeywordOp) \ and right_subtree.op.left == Keyword('volume'): return None, None elif isinstance(right_subtree, AndOp) and isinstance(right_subtree.left, NotOp) \ and isinstance(right_subtree.left.op, KeywordOp) and right_subtree.left.op.left == Keyword('volume'): return None, right_subtree.right elif isinstance(right_subtree, KeywordOp) and right_subtree.left == Keyword('volume'): return right_subtree, None elif isinstance(right_subtree, AndOp) and right_subtree.left.left == Keyword('volume'): return right_subtree.left, right_subtree.right journal_value = left.right.value volume_and_remaining_subtree = _get_volume_keyword_op_and_remaining_subtree(right) if not volume_and_remaining_subtree: return volume_node, remaining_subtree = volume_and_remaining_subtree if volume_node: left.right.value = ','.join([journal_value, volume_node.right.value]) return AndOp(left, remaining_subtree) if remaining_subtree else left
Remove volume node if it follows a journal logically in the tree hierarchy. Args: left (ast.ASTElement): The journal KeywordOp node. right (ast.ASTElement): The rest of the tree to be restructured. Return: (ast.ASTElement): The restructured tree, with the volume node removed. Notes: This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with Keyword 'volume' and discards 'volume' KeywordOp node from the tree.
Below is the the instruction that describes the task: ### Input: Remove volume node if it follows a journal logically in the tree hierarchy. Args: left (ast.ASTElement): The journal KeywordOp node. right (ast.ASTElement): The rest of the tree to be restructured. Return: (ast.ASTElement): The restructured tree, with the volume node removed. Notes: This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with Keyword 'volume' and discards 'volume' KeywordOp node from the tree. ### Response: def _restructure_if_volume_follows_journal(left, right): """Remove volume node if it follows a journal logically in the tree hierarchy. Args: left (ast.ASTElement): The journal KeywordOp node. right (ast.ASTElement): The rest of the tree to be restructured. Return: (ast.ASTElement): The restructured tree, with the volume node removed. Notes: This happens to support queries like "journal Phys.Rev. and vol d85". Appends the value of KeywordOp with Keyword 'volume' and discards 'volume' KeywordOp node from the tree. """ def _get_volume_keyword_op_and_remaining_subtree(right_subtree): if isinstance(right_subtree, NotOp) and isinstance(right_subtree.op, KeywordOp) \ and right_subtree.op.left == Keyword('volume'): return None, None elif isinstance(right_subtree, AndOp) and isinstance(right_subtree.left, NotOp) \ and isinstance(right_subtree.left.op, KeywordOp) and right_subtree.left.op.left == Keyword('volume'): return None, right_subtree.right elif isinstance(right_subtree, KeywordOp) and right_subtree.left == Keyword('volume'): return right_subtree, None elif isinstance(right_subtree, AndOp) and right_subtree.left.left == Keyword('volume'): return right_subtree.left, right_subtree.right journal_value = left.right.value volume_and_remaining_subtree = _get_volume_keyword_op_and_remaining_subtree(right) if not volume_and_remaining_subtree: return volume_node, remaining_subtree = volume_and_remaining_subtree if volume_node: left.right.value = ','.join([journal_value, volume_node.right.value]) return AndOp(left, remaining_subtree) if remaining_subtree else left
def scan_recurse(self, node, path=()): """ do a recursive scan of the top level target file This lets us search for included files based on the directory of the main file just as latex does""" path_dict = dict(list(path)) queue = [] queue.extend( self.scan(node) ) seen = {} # This is a hand-coded DSU (decorate-sort-undecorate, or # Schwartzian transform) pattern. The sort key is the raw name # of the file as specifed on the \include, \input, etc. line. # TODO: what about the comment in the original Classic scanner: # """which lets # us keep the sort order constant regardless of whether the file # is actually found in a Repository or locally.""" nodes = [] source_dir = node.get_dir() #for include in includes: while queue: include = queue.pop() inc_type, inc_subdir, inc_filename = include try: if seen[inc_filename] == 1: continue except KeyError: seen[inc_filename] = 1 # # Handle multiple filenames in include[1] # n, i = self.find_include(include, source_dir, path_dict) if n is None: # Do not bother with 'usepackage' warnings, as they most # likely refer to system-level files if inc_type != 'usepackage': SCons.Warnings.warn(SCons.Warnings.DependencyWarning, "No dependency generated for file: %s (included from: %s) -- file not found" % (i, node)) else: sortkey = self.sort_key(n) nodes.append((sortkey, n)) # recurse down queue.extend( self.scan(n, inc_subdir) ) return [pair[1] for pair in sorted(nodes)]
do a recursive scan of the top level target file This lets us search for included files based on the directory of the main file just as latex does
Below is the the instruction that describes the task: ### Input: do a recursive scan of the top level target file This lets us search for included files based on the directory of the main file just as latex does ### Response: def scan_recurse(self, node, path=()): """ do a recursive scan of the top level target file This lets us search for included files based on the directory of the main file just as latex does""" path_dict = dict(list(path)) queue = [] queue.extend( self.scan(node) ) seen = {} # This is a hand-coded DSU (decorate-sort-undecorate, or # Schwartzian transform) pattern. The sort key is the raw name # of the file as specifed on the \include, \input, etc. line. # TODO: what about the comment in the original Classic scanner: # """which lets # us keep the sort order constant regardless of whether the file # is actually found in a Repository or locally.""" nodes = [] source_dir = node.get_dir() #for include in includes: while queue: include = queue.pop() inc_type, inc_subdir, inc_filename = include try: if seen[inc_filename] == 1: continue except KeyError: seen[inc_filename] = 1 # # Handle multiple filenames in include[1] # n, i = self.find_include(include, source_dir, path_dict) if n is None: # Do not bother with 'usepackage' warnings, as they most # likely refer to system-level files if inc_type != 'usepackage': SCons.Warnings.warn(SCons.Warnings.DependencyWarning, "No dependency generated for file: %s (included from: %s) -- file not found" % (i, node)) else: sortkey = self.sort_key(n) nodes.append((sortkey, n)) # recurse down queue.extend( self.scan(n, inc_subdir) ) return [pair[1] for pair in sorted(nodes)]
def with_stmt_handle(self, tokens): """Process with statements.""" internal_assert(len(tokens) == 2, "invalid with statement tokens", tokens) withs, body = tokens if len(withs) == 1 or self.target_info >= (2, 7): return "with " + ", ".join(withs) + body else: return ( "".join("with " + expr + ":\n" + openindent for expr in withs[:-1]) + "with " + withs[-1] + body + closeindent * (len(withs) - 1) )
Process with statements.
Below is the the instruction that describes the task: ### Input: Process with statements. ### Response: def with_stmt_handle(self, tokens): """Process with statements.""" internal_assert(len(tokens) == 2, "invalid with statement tokens", tokens) withs, body = tokens if len(withs) == 1 or self.target_info >= (2, 7): return "with " + ", ".join(withs) + body else: return ( "".join("with " + expr + ":\n" + openindent for expr in withs[:-1]) + "with " + withs[-1] + body + closeindent * (len(withs) - 1) )
def container_new_folder(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /container-xxxx/newFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FnewFolder """ return DXHTTPRequest('/%s/newFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /container-xxxx/newFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FnewFolder
Below is the the instruction that describes the task: ### Input: Invokes the /container-xxxx/newFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FnewFolder ### Response: def container_new_folder(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /container-xxxx/newFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FnewFolder """ return DXHTTPRequest('/%s/newFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList: """Produce N identical layers.""" return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])
Produce N identical layers.
Below is the the instruction that describes the task: ### Input: Produce N identical layers. ### Response: def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList: """Produce N identical layers.""" return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])
def get_json_encoders_for_type(self, type_to_encode: type) -> Optional[Iterable[JSONEncoder]]: """ Gets the registered JSON encoder for the given type. :param type_to_encode: the type of object that is to be encoded :return: the encoder for the given object else `None` if unknown """ if type_to_encode not in self._json_encoders: return None return self._json_encoders[type_to_encode]
Gets the registered JSON encoder for the given type. :param type_to_encode: the type of object that is to be encoded :return: the encoder for the given object else `None` if unknown
Below is the the instruction that describes the task: ### Input: Gets the registered JSON encoder for the given type. :param type_to_encode: the type of object that is to be encoded :return: the encoder for the given object else `None` if unknown ### Response: def get_json_encoders_for_type(self, type_to_encode: type) -> Optional[Iterable[JSONEncoder]]: """ Gets the registered JSON encoder for the given type. :param type_to_encode: the type of object that is to be encoded :return: the encoder for the given object else `None` if unknown """ if type_to_encode not in self._json_encoders: return None return self._json_encoders[type_to_encode]
def freeze(self): """Create a usable data structure for serializing.""" data = super(IndexBuilder, self).freeze() try: # Sphinx >= 1.5 format # Due to changes from github.com/sphinx-doc/sphinx/pull/2454 base_file_names = data['docnames'] except KeyError: # Sphinx < 1.5 format base_file_names = data['filenames'] store = {} c = itertools.count() for prefix, items in iteritems(data['objects']): for name, (index, typeindex, _, shortanchor) in iteritems(items): objtype = data['objtypes'][typeindex] if objtype.startswith('cpp:'): split = name.rsplit('::', 1) if len(split) != 2: warnings.warn("What's up with %s?" % str((prefix, name, objtype))) continue prefix, name = split last_prefix = prefix.split('::')[-1] else: last_prefix = prefix.split('.')[-1] store[next(c)] = { 'filename': base_file_names[index], 'objtype': objtype, 'prefix': prefix, 'last_prefix': last_prefix, 'name': name, 'shortanchor': shortanchor, } data.update({'store': store}) return data
Create a usable data structure for serializing.
Below is the the instruction that describes the task: ### Input: Create a usable data structure for serializing. ### Response: def freeze(self): """Create a usable data structure for serializing.""" data = super(IndexBuilder, self).freeze() try: # Sphinx >= 1.5 format # Due to changes from github.com/sphinx-doc/sphinx/pull/2454 base_file_names = data['docnames'] except KeyError: # Sphinx < 1.5 format base_file_names = data['filenames'] store = {} c = itertools.count() for prefix, items in iteritems(data['objects']): for name, (index, typeindex, _, shortanchor) in iteritems(items): objtype = data['objtypes'][typeindex] if objtype.startswith('cpp:'): split = name.rsplit('::', 1) if len(split) != 2: warnings.warn("What's up with %s?" % str((prefix, name, objtype))) continue prefix, name = split last_prefix = prefix.split('::')[-1] else: last_prefix = prefix.split('.')[-1] store[next(c)] = { 'filename': base_file_names[index], 'objtype': objtype, 'prefix': prefix, 'last_prefix': last_prefix, 'name': name, 'shortanchor': shortanchor, } data.update({'store': store}) return data
def get_app_kwarg_dict(appInstance): """Returns the dictionary of CORS specific app configurations.""" # In order to support blueprints which do not have a config attribute app_config = getattr(appInstance, 'config', {}) return dict( (k.lower().replace('cors_', ''), app_config.get(k)) for k in CONFIG_OPTIONS if app_config.get(k) is not None )
Returns the dictionary of CORS specific app configurations.
Below is the the instruction that describes the task: ### Input: Returns the dictionary of CORS specific app configurations. ### Response: def get_app_kwarg_dict(appInstance): """Returns the dictionary of CORS specific app configurations.""" # In order to support blueprints which do not have a config attribute app_config = getattr(appInstance, 'config', {}) return dict( (k.lower().replace('cors_', ''), app_config.get(k)) for k in CONFIG_OPTIONS if app_config.get(k) is not None )
def list_files(dir_path, recursive=True): """ Return a list of files in dir_path. """ for root, dirs, files in os.walk(dir_path): file_list = [os.path.join(root, f) for f in files] if recursive: for dir in dirs: dir = os.path.join(root, dir) file_list.extend(list_files(dir, recursive=True)) return file_list
Return a list of files in dir_path.
Below is the the instruction that describes the task: ### Input: Return a list of files in dir_path. ### Response: def list_files(dir_path, recursive=True): """ Return a list of files in dir_path. """ for root, dirs, files in os.walk(dir_path): file_list = [os.path.join(root, f) for f in files] if recursive: for dir in dirs: dir = os.path.join(root, dir) file_list.extend(list_files(dir, recursive=True)) return file_list
def get_template(file): ''' Lookup a template class for the given filename or file extension. Return nil when no implementation is found. ''' pattern = str(file).lower() while len(pattern) and not Lean.is_registered(pattern): pattern = os.path.basename(pattern) pattern = re.sub(r'^[^.]*\.?','',pattern) # Try to find a preferred engine. preferred_klass = Lean.preferred_mappings[pattern] if Lean.preferred_mappings.has_key(pattern) else None if preferred_klass: return preferred_klass # Fall back to the general list of mappings klasses = Lean.template_mappings[pattern] # Try to find an engine which is already loaded template = None for klass in klasses: if hasattr(klass,'is_engine_initialized') and callable(klass.is_engine_initialized): if klass.is_engine_initialized(): template = klass break if template: return template # Try each of the classes until one succeeds. If all of them fails, # we'll raise the error of the first class. first_failure = None for klass in klasses: try: return klass except Exception, e: if not first_failure: first_failure = e if first_failure: raise Exception(first_failure)
Lookup a template class for the given filename or file extension. Return nil when no implementation is found.
Below is the the instruction that describes the task: ### Input: Lookup a template class for the given filename or file extension. Return nil when no implementation is found. ### Response: def get_template(file): ''' Lookup a template class for the given filename or file extension. Return nil when no implementation is found. ''' pattern = str(file).lower() while len(pattern) and not Lean.is_registered(pattern): pattern = os.path.basename(pattern) pattern = re.sub(r'^[^.]*\.?','',pattern) # Try to find a preferred engine. preferred_klass = Lean.preferred_mappings[pattern] if Lean.preferred_mappings.has_key(pattern) else None if preferred_klass: return preferred_klass # Fall back to the general list of mappings klasses = Lean.template_mappings[pattern] # Try to find an engine which is already loaded template = None for klass in klasses: if hasattr(klass,'is_engine_initialized') and callable(klass.is_engine_initialized): if klass.is_engine_initialized(): template = klass break if template: return template # Try each of the classes until one succeeds. If all of them fails, # we'll raise the error of the first class. first_failure = None for klass in klasses: try: return klass except Exception, e: if not first_failure: first_failure = e if first_failure: raise Exception(first_failure)
def parse(query_string, info={}): """ :returns: a normalized query_dict as in the following examples: >>> parse('kind=stats', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean', 'max'], 'k': [0, 1], 'rlzs': False} >>> parse('kind=rlzs', {'stats': {}, 'num_rlzs': 3}) {'kind': ['rlz-000', 'rlz-001', 'rlz-002'], 'k': [0, 1, 2], 'rlzs': True} >>> parse('kind=mean', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean'], 'k': [0], 'rlzs': False} >>> parse('kind=rlz-3&imt=PGA&site_id=0', {'stats': {}}) {'kind': ['rlz-3'], 'imt': ['PGA'], 'site_id': [0], 'k': [3], 'rlzs': True} """ qdic = parse_qs(query_string) loss_types = info.get('loss_types', []) for key, val in qdic.items(): # for instance, convert site_id to an int if key == 'loss_type': qdic[key] = [loss_types[k] for k in val] else: qdic[key] = [lit_eval(v) for v in val] if info: qdic['k'], qdic['kind'], qdic['rlzs'] = _normalize(qdic['kind'], info) return qdic
:returns: a normalized query_dict as in the following examples: >>> parse('kind=stats', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean', 'max'], 'k': [0, 1], 'rlzs': False} >>> parse('kind=rlzs', {'stats': {}, 'num_rlzs': 3}) {'kind': ['rlz-000', 'rlz-001', 'rlz-002'], 'k': [0, 1, 2], 'rlzs': True} >>> parse('kind=mean', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean'], 'k': [0], 'rlzs': False} >>> parse('kind=rlz-3&imt=PGA&site_id=0', {'stats': {}}) {'kind': ['rlz-3'], 'imt': ['PGA'], 'site_id': [0], 'k': [3], 'rlzs': True}
Below is the the instruction that describes the task: ### Input: :returns: a normalized query_dict as in the following examples: >>> parse('kind=stats', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean', 'max'], 'k': [0, 1], 'rlzs': False} >>> parse('kind=rlzs', {'stats': {}, 'num_rlzs': 3}) {'kind': ['rlz-000', 'rlz-001', 'rlz-002'], 'k': [0, 1, 2], 'rlzs': True} >>> parse('kind=mean', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean'], 'k': [0], 'rlzs': False} >>> parse('kind=rlz-3&imt=PGA&site_id=0', {'stats': {}}) {'kind': ['rlz-3'], 'imt': ['PGA'], 'site_id': [0], 'k': [3], 'rlzs': True} ### Response: def parse(query_string, info={}): """ :returns: a normalized query_dict as in the following examples: >>> parse('kind=stats', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean', 'max'], 'k': [0, 1], 'rlzs': False} >>> parse('kind=rlzs', {'stats': {}, 'num_rlzs': 3}) {'kind': ['rlz-000', 'rlz-001', 'rlz-002'], 'k': [0, 1, 2], 'rlzs': True} >>> parse('kind=mean', {'stats': {'mean': 0, 'max': 1}}) {'kind': ['mean'], 'k': [0], 'rlzs': False} >>> parse('kind=rlz-3&imt=PGA&site_id=0', {'stats': {}}) {'kind': ['rlz-3'], 'imt': ['PGA'], 'site_id': [0], 'k': [3], 'rlzs': True} """ qdic = parse_qs(query_string) loss_types = info.get('loss_types', []) for key, val in qdic.items(): # for instance, convert site_id to an int if key == 'loss_type': qdic[key] = [loss_types[k] for k in val] else: qdic[key] = [lit_eval(v) for v in val] if info: qdic['k'], qdic['kind'], qdic['rlzs'] = _normalize(qdic['kind'], info) return qdic
def visit_starred(self, node, parent): """visit a Starred node and return a new instance of it""" context = self._get_context(node) newnode = nodes.Starred( ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent ) newnode.postinit(self.visit(node.value, newnode)) return newnode
visit a Starred node and return a new instance of it
Below is the the instruction that describes the task: ### Input: visit a Starred node and return a new instance of it ### Response: def visit_starred(self, node, parent): """visit a Starred node and return a new instance of it""" context = self._get_context(node) newnode = nodes.Starred( ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent ) newnode.postinit(self.visit(node.value, newnode)) return newnode
def run_step(*args, prompt=None): """ Prints out the command and asks if it should be run. If yes (default), runs it. :param args: list of strings (command and args) """ global DRY_RUN cmd = args print(' '.join(cmd)) if skip_step(): print('--- Skipping...') elif DRY_RUN: print('--- Pretending to run...') else: if prompt: print(prompt) subprocess.check_output(cmd)
Prints out the command and asks if it should be run. If yes (default), runs it. :param args: list of strings (command and args)
Below is the the instruction that describes the task: ### Input: Prints out the command and asks if it should be run. If yes (default), runs it. :param args: list of strings (command and args) ### Response: def run_step(*args, prompt=None): """ Prints out the command and asks if it should be run. If yes (default), runs it. :param args: list of strings (command and args) """ global DRY_RUN cmd = args print(' '.join(cmd)) if skip_step(): print('--- Skipping...') elif DRY_RUN: print('--- Pretending to run...') else: if prompt: print(prompt) subprocess.check_output(cmd)
def get_feature_state_for_scope(self, feature_id, user_scope, scope_name, scope_value): """GetFeatureStateForScope. [Preview API] Get the state of the specified feature for the given named scope :param str feature_id: Contribution id of the feature :param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users. :param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team") :param str scope_value: Value of the scope (e.g. the project or team id) :rtype: :class:`<ContributedFeatureState> <azure.devops.v5_0.feature_management.models.ContributedFeatureState>` """ route_values = {} if feature_id is not None: route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str') if user_scope is not None: route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str') if scope_name is not None: route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str') if scope_value is not None: route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str') response = self._send(http_method='GET', location_id='dd291e43-aa9f-4cee-8465-a93c78e414a4', version='5.0-preview.1', route_values=route_values) return self._deserialize('ContributedFeatureState', response)
GetFeatureStateForScope. [Preview API] Get the state of the specified feature for the given named scope :param str feature_id: Contribution id of the feature :param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users. :param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team") :param str scope_value: Value of the scope (e.g. the project or team id) :rtype: :class:`<ContributedFeatureState> <azure.devops.v5_0.feature_management.models.ContributedFeatureState>`
Below is the the instruction that describes the task: ### Input: GetFeatureStateForScope. [Preview API] Get the state of the specified feature for the given named scope :param str feature_id: Contribution id of the feature :param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users. :param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team") :param str scope_value: Value of the scope (e.g. the project or team id) :rtype: :class:`<ContributedFeatureState> <azure.devops.v5_0.feature_management.models.ContributedFeatureState>` ### Response: def get_feature_state_for_scope(self, feature_id, user_scope, scope_name, scope_value): """GetFeatureStateForScope. [Preview API] Get the state of the specified feature for the given named scope :param str feature_id: Contribution id of the feature :param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users. :param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team") :param str scope_value: Value of the scope (e.g. the project or team id) :rtype: :class:`<ContributedFeatureState> <azure.devops.v5_0.feature_management.models.ContributedFeatureState>` """ route_values = {} if feature_id is not None: route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str') if user_scope is not None: route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str') if scope_name is not None: route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str') if scope_value is not None: route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str') response = self._send(http_method='GET', location_id='dd291e43-aa9f-4cee-8465-a93c78e414a4', version='5.0-preview.1', route_values=route_values) return self._deserialize('ContributedFeatureState', response)
def _daily_suns(self, datetimes): """Get sun curve for multiple days of the year.""" for dt in datetimes: # calculate sunrise sunset and noon nss = self.calculate_sunrise_sunset(dt.month, dt.day) dts = tuple(nss[k] for k in ('sunrise', 'noon', 'sunset')) if dts[0] is None: # circle yield (self.calculate_sun(dt.month, dt.day, h) for h in (0, 12, 15)), \ False else: # Arc yield (self.calculate_sun_from_date_time(dt) for dt in dts), True
Get sun curve for multiple days of the year.
Below is the the instruction that describes the task: ### Input: Get sun curve for multiple days of the year. ### Response: def _daily_suns(self, datetimes): """Get sun curve for multiple days of the year.""" for dt in datetimes: # calculate sunrise sunset and noon nss = self.calculate_sunrise_sunset(dt.month, dt.day) dts = tuple(nss[k] for k in ('sunrise', 'noon', 'sunset')) if dts[0] is None: # circle yield (self.calculate_sun(dt.month, dt.day, h) for h in (0, 12, 15)), \ False else: # Arc yield (self.calculate_sun_from_date_time(dt) for dt in dts), True
def get_language_progress(self, lang): """Get informations about user's progression in a language.""" if not self._is_current_language(lang): self._switch_language(lang) fields = ['streak', 'language_string', 'level_progress', 'num_skills_learned', 'level_percent', 'level_points', 'points_rank', 'next_level', 'level_left', 'language', 'points', 'fluency_score', 'level'] return self._make_dict(fields, self.user_data.language_data[lang])
Get informations about user's progression in a language.
Below is the the instruction that describes the task: ### Input: Get informations about user's progression in a language. ### Response: def get_language_progress(self, lang): """Get informations about user's progression in a language.""" if not self._is_current_language(lang): self._switch_language(lang) fields = ['streak', 'language_string', 'level_progress', 'num_skills_learned', 'level_percent', 'level_points', 'points_rank', 'next_level', 'level_left', 'language', 'points', 'fluency_score', 'level'] return self._make_dict(fields, self.user_data.language_data[lang])
def clear_group(self): """Clears the group designation. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.clear_group_template if (self.get_group_metadata().is_read_only() or self.get_group_metadata().is_required()): raise errors.NoAccess() self._my_map['group'] = self._group_default
Clears the group designation. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Clears the group designation. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* ### Response: def clear_group(self): """Clears the group designation. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.clear_group_template if (self.get_group_metadata().is_read_only() or self.get_group_metadata().is_required()): raise errors.NoAccess() self._my_map['group'] = self._group_default
def download_encrypted_file(work_dir, url, key_path, name): """ Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded """ file_path = os.path.join(work_dir, name) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path)
Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded
Below is the the instruction that describes the task: ### Input: Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded ### Response: def download_encrypted_file(work_dir, url, key_path, name): """ Downloads encrypted file from S3 Input1: Working directory Input2: S3 URL to be downloaded Input3: Path to key necessary for decryption Input4: name of file to be downloaded """ file_path = os.path.join(work_dir, name) key = generate_unique_key(key_path, url) encoded_key = base64.b64encode(key) encoded_key_md5 = base64.b64encode(hashlib.md5(key).digest()) h1 = 'x-amz-server-side-encryption-customer-algorithm:AES256' h2 = 'x-amz-server-side-encryption-customer-key:{}'.format(encoded_key) h3 = 'x-amz-server-side-encryption-customer-key-md5:{}'.format(encoded_key_md5) try: subprocess.check_call(['curl', '-fs', '--retry', '5', '-H', h1, '-H', h2, '-H', h3, url, '-o', file_path]) except OSError: raise RuntimeError('Failed to find "curl". Install via "apt-get install curl"') assert os.path.exists(file_path)
def kegg_pathway(df, pathway, a, b=None, ids_from="Proteins", cmap=cm.PuOr_r, is_log2=False, fillna=None, z_score=1): """ Visualize data on a kegg pathway. :param df: :param pathway: :param a: :param b: :param ids_from: :param cmap: :param is_log2: :param fillna: :param z_score: :return: """ df = df.copy() if np.any(df.values < 0) and not is_log2: warnings.warn("Input data has negative values. If data is log2 transformed, set is_log2=True.") if fillna is not None: df = df.fillna(fillna) if z_score is None: pass elif z_score == 0: df = (df - df.median(axis=0)) / df.std(axis=0) elif z_score == 1: df = ((df.T - df.median(axis=1).T) / df.std(axis=1).T).T if b is not None: # Calculate ratio between two groups g1, g2 = df[a].values, df[b].values if is_log2: dr = np.nanmean(g2, axis=1) - np.nanmean(g1, axis=1) else: dr = np.log2(np.nanmean(g2, axis=1) / np.nanmean(g1, axis=1)) else: g1 = df[a].values dr = np.nanmean(g1, axis=1) maxi = np.max(abs(dr)) norm = mpl.colors.Normalize(vmin=-maxi, vmax=maxi) mapper = cm.ScalarMappable(norm=norm, cmap=cm.PuOr_r) # Orange up node_colors = {} for p, v in zip(df.index.get_level_values(ids_from), dr): pid = str(p).split(";")[-1] if "_" in pid: pid = pid[:pid.index("_")] node_colors[pid] = mpl.colors.rgb2hex(mapper.to_rgba(v)) global uniprot_kegg_cache # Only do this once upids = list( node_colors.keys() ) upids = [p for p in upids if p not in uniprot_kegg_cache.keys()] if upids: new_pairs = get_uniprot_id_mapping_pairs('ACC+ID', 'KEGG_ID', upids) uniprot_kegg_cache.update(new_pairs) for p in upids: if p not in uniprot_kegg_cache: uniprot_kegg_cache[p] = None # Not found, don't look again with StringIO() as f: f.write('#hsa\tData\n') for k, c in list(node_colors.items()): if k in uniprot_kegg_cache and uniprot_kegg_cache[k] is not None: kids = uniprot_kegg_cache[k] for kegg_id in kids: f.write('%s\t%s\n' % (kegg_id.split(':')[-1], c )) # Reset file f.seek(0) url = 'https://www.kegg.jp/kegg-bin/mcolor_pathway' m = MultipartEncoder( fields={ 'map': pathway, 'mapping_list': ('filename', f), 'mode': 'color', 'submit': 'Exec', 'reference': 'white', } ) r = requests.post(url, data=m, headers={'Content-Type': m.content_type}) if r.status_code == 200: # src="/tmp/mark_pathway154353327948969/hsa04010.1.png" ms = re.finditer('src="(/tmp/mark_pathway[^"]*.png)"', r.text) m = list(ms).pop() # Download image data image = Image.open(requests.get('http://www.kegg.jp%s' % m.group(1), stream=True).raw) width, height = image.size # Get dimensions image = image.crop((1, 1, width-1, height-1)) # Crop black outline print("Scale range: %.2f .. %.2f" % (-maxi, maxi)) return image
Visualize data on a kegg pathway. :param df: :param pathway: :param a: :param b: :param ids_from: :param cmap: :param is_log2: :param fillna: :param z_score: :return:
Below is the the instruction that describes the task: ### Input: Visualize data on a kegg pathway. :param df: :param pathway: :param a: :param b: :param ids_from: :param cmap: :param is_log2: :param fillna: :param z_score: :return: ### Response: def kegg_pathway(df, pathway, a, b=None, ids_from="Proteins", cmap=cm.PuOr_r, is_log2=False, fillna=None, z_score=1): """ Visualize data on a kegg pathway. :param df: :param pathway: :param a: :param b: :param ids_from: :param cmap: :param is_log2: :param fillna: :param z_score: :return: """ df = df.copy() if np.any(df.values < 0) and not is_log2: warnings.warn("Input data has negative values. If data is log2 transformed, set is_log2=True.") if fillna is not None: df = df.fillna(fillna) if z_score is None: pass elif z_score == 0: df = (df - df.median(axis=0)) / df.std(axis=0) elif z_score == 1: df = ((df.T - df.median(axis=1).T) / df.std(axis=1).T).T if b is not None: # Calculate ratio between two groups g1, g2 = df[a].values, df[b].values if is_log2: dr = np.nanmean(g2, axis=1) - np.nanmean(g1, axis=1) else: dr = np.log2(np.nanmean(g2, axis=1) / np.nanmean(g1, axis=1)) else: g1 = df[a].values dr = np.nanmean(g1, axis=1) maxi = np.max(abs(dr)) norm = mpl.colors.Normalize(vmin=-maxi, vmax=maxi) mapper = cm.ScalarMappable(norm=norm, cmap=cm.PuOr_r) # Orange up node_colors = {} for p, v in zip(df.index.get_level_values(ids_from), dr): pid = str(p).split(";")[-1] if "_" in pid: pid = pid[:pid.index("_")] node_colors[pid] = mpl.colors.rgb2hex(mapper.to_rgba(v)) global uniprot_kegg_cache # Only do this once upids = list( node_colors.keys() ) upids = [p for p in upids if p not in uniprot_kegg_cache.keys()] if upids: new_pairs = get_uniprot_id_mapping_pairs('ACC+ID', 'KEGG_ID', upids) uniprot_kegg_cache.update(new_pairs) for p in upids: if p not in uniprot_kegg_cache: uniprot_kegg_cache[p] = None # Not found, don't look again with StringIO() as f: f.write('#hsa\tData\n') for k, c in list(node_colors.items()): if k in uniprot_kegg_cache and uniprot_kegg_cache[k] is not None: kids = uniprot_kegg_cache[k] for kegg_id in kids: f.write('%s\t%s\n' % (kegg_id.split(':')[-1], c )) # Reset file f.seek(0) url = 'https://www.kegg.jp/kegg-bin/mcolor_pathway' m = MultipartEncoder( fields={ 'map': pathway, 'mapping_list': ('filename', f), 'mode': 'color', 'submit': 'Exec', 'reference': 'white', } ) r = requests.post(url, data=m, headers={'Content-Type': m.content_type}) if r.status_code == 200: # src="/tmp/mark_pathway154353327948969/hsa04010.1.png" ms = re.finditer('src="(/tmp/mark_pathway[^"]*.png)"', r.text) m = list(ms).pop() # Download image data image = Image.open(requests.get('http://www.kegg.jp%s' % m.group(1), stream=True).raw) width, height = image.size # Get dimensions image = image.crop((1, 1, width-1, height-1)) # Crop black outline print("Scale range: %.2f .. %.2f" % (-maxi, maxi)) return image
def plot_projected_dos(self, pdos_indices=None, legend=None): """Plot projected DOS Parameters ---------- pdos_indices : list of list, optional Sets of indices of atoms whose projected DOS are summed over. The indices start with 0. An example is as follwos: pdos_indices=[[0, 1], [2, 3, 4, 5]] Default is None, which means pdos_indices=[[i] for i in range(natom)] legend : list of instances such as str or int, optional The str(instance) are shown in legend. It has to be len(pdos_indices)==len(legend). Default is None. When None, legend is not shown. """ import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') ax.xaxis.set_tick_params(which='both', direction='in') ax.yaxis.set_tick_params(which='both', direction='in') self._pdos.plot(ax, indices=pdos_indices, legend=legend, draw_grid=False) ax.set_ylim((0, None)) return plt
Plot projected DOS Parameters ---------- pdos_indices : list of list, optional Sets of indices of atoms whose projected DOS are summed over. The indices start with 0. An example is as follwos: pdos_indices=[[0, 1], [2, 3, 4, 5]] Default is None, which means pdos_indices=[[i] for i in range(natom)] legend : list of instances such as str or int, optional The str(instance) are shown in legend. It has to be len(pdos_indices)==len(legend). Default is None. When None, legend is not shown.
Below is the the instruction that describes the task: ### Input: Plot projected DOS Parameters ---------- pdos_indices : list of list, optional Sets of indices of atoms whose projected DOS are summed over. The indices start with 0. An example is as follwos: pdos_indices=[[0, 1], [2, 3, 4, 5]] Default is None, which means pdos_indices=[[i] for i in range(natom)] legend : list of instances such as str or int, optional The str(instance) are shown in legend. It has to be len(pdos_indices)==len(legend). Default is None. When None, legend is not shown. ### Response: def plot_projected_dos(self, pdos_indices=None, legend=None): """Plot projected DOS Parameters ---------- pdos_indices : list of list, optional Sets of indices of atoms whose projected DOS are summed over. The indices start with 0. An example is as follwos: pdos_indices=[[0, 1], [2, 3, 4, 5]] Default is None, which means pdos_indices=[[i] for i in range(natom)] legend : list of instances such as str or int, optional The str(instance) are shown in legend. It has to be len(pdos_indices)==len(legend). Default is None. When None, legend is not shown. """ import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') ax.xaxis.set_tick_params(which='both', direction='in') ax.yaxis.set_tick_params(which='both', direction='in') self._pdos.plot(ax, indices=pdos_indices, legend=legend, draw_grid=False) ax.set_ylim((0, None)) return plt
def setup_cmd_parser(cls): """Returns the Groupsio argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Optional arguments group = parser.parser.add_argument_group('Groupsio arguments') group.add_argument('--mboxes-path', dest='mboxes_path', help="Path where mbox files will be stored") group.add_argument('--no-verify', dest='verify', action='store_false', help="Value 'True' enable SSL verification") # Required arguments parser.parser.add_argument('group_name', help="Name of the group on Groups.io") return parser
Returns the Groupsio argument parser.
Below is the the instruction that describes the task: ### Input: Returns the Groupsio argument parser. ### Response: def setup_cmd_parser(cls): """Returns the Groupsio argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Optional arguments group = parser.parser.add_argument_group('Groupsio arguments') group.add_argument('--mboxes-path', dest='mboxes_path', help="Path where mbox files will be stored") group.add_argument('--no-verify', dest='verify', action='store_false', help="Value 'True' enable SSL verification") # Required arguments parser.parser.add_argument('group_name', help="Name of the group on Groups.io") return parser
def find_stars(self, data, mask=None): """ Find stars in an astronomical image. Parameters ---------- data : 2D array_like The 2D image array. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. Returns ------- table : `~astropy.table.Table` or `None` A table of found stars with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``sharpness``: object sharpness. * ``roundness1``: object roundness based on symmetry. * ``roundness2``: object roundness based on marginal Gaussian fits. * ``npix``: the total number of pixels in the Gaussian kernel array. * ``sky``: the input ``sky`` parameter. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object flux calculated as the peak density in the convolved image divided by the detection threshold. This derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. The derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. `None` is returned if no stars are found. """ star_cutouts = _find_stars(data, self.kernel, self.threshold_eff, mask=mask, exclude_border=self.exclude_border) if star_cutouts is None: warnings.warn('No sources were found.', NoDetectionsWarning) return None self._star_cutouts = star_cutouts star_props = [] for star_cutout in star_cutouts: props = _DAOFind_Properties(star_cutout, self.kernel, self.sky) if np.isnan(props.dx_hx).any() or np.isnan(props.dy_hy).any(): continue if (props.sharpness <= self.sharplo or props.sharpness >= self.sharphi): continue if (props.roundness1 <= self.roundlo or props.roundness1 >= self.roundhi): continue if (props.roundness2 <= self.roundlo or props.roundness2 >= self.roundhi): continue if self.peakmax is not None and props.peak >= self.peakmax: continue star_props.append(props) nstars = len(star_props) if nstars == 0: warnings.warn('Sources were found, but none pass the sharpness ' 'and roundness criteria.', NoDetectionsWarning) return None if self.brightest is not None: fluxes = [props.flux for props in star_props] idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist()) star_props = [star_props[k] for k in idx] nstars = len(star_props) table = Table() table['id'] = np.arange(nstars) + 1 columns = ('xcentroid', 'ycentroid', 'sharpness', 'roundness1', 'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag') for column in columns: table[column] = [getattr(props, column) for props in star_props] return table
Find stars in an astronomical image. Parameters ---------- data : 2D array_like The 2D image array. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. Returns ------- table : `~astropy.table.Table` or `None` A table of found stars with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``sharpness``: object sharpness. * ``roundness1``: object roundness based on symmetry. * ``roundness2``: object roundness based on marginal Gaussian fits. * ``npix``: the total number of pixels in the Gaussian kernel array. * ``sky``: the input ``sky`` parameter. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object flux calculated as the peak density in the convolved image divided by the detection threshold. This derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. The derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. `None` is returned if no stars are found.
Below is the the instruction that describes the task: ### Input: Find stars in an astronomical image. Parameters ---------- data : 2D array_like The 2D image array. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. Returns ------- table : `~astropy.table.Table` or `None` A table of found stars with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``sharpness``: object sharpness. * ``roundness1``: object roundness based on symmetry. * ``roundness2``: object roundness based on marginal Gaussian fits. * ``npix``: the total number of pixels in the Gaussian kernel array. * ``sky``: the input ``sky`` parameter. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object flux calculated as the peak density in the convolved image divided by the detection threshold. This derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. The derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. `None` is returned if no stars are found. ### Response: def find_stars(self, data, mask=None): """ Find stars in an astronomical image. Parameters ---------- data : 2D array_like The 2D image array. mask : 2D bool array, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are ignored when searching for stars. Returns ------- table : `~astropy.table.Table` or `None` A table of found stars with the following parameters: * ``id``: unique object identification number. * ``xcentroid, ycentroid``: object centroid. * ``sharpness``: object sharpness. * ``roundness1``: object roundness based on symmetry. * ``roundness2``: object roundness based on marginal Gaussian fits. * ``npix``: the total number of pixels in the Gaussian kernel array. * ``sky``: the input ``sky`` parameter. * ``peak``: the peak, sky-subtracted, pixel value of the object. * ``flux``: the object flux calculated as the peak density in the convolved image divided by the detection threshold. This derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. * ``mag``: the object instrumental magnitude calculated as ``-2.5 * log10(flux)``. The derivation matches that of `DAOFIND`_ if ``sky`` is 0.0. `None` is returned if no stars are found. """ star_cutouts = _find_stars(data, self.kernel, self.threshold_eff, mask=mask, exclude_border=self.exclude_border) if star_cutouts is None: warnings.warn('No sources were found.', NoDetectionsWarning) return None self._star_cutouts = star_cutouts star_props = [] for star_cutout in star_cutouts: props = _DAOFind_Properties(star_cutout, self.kernel, self.sky) if np.isnan(props.dx_hx).any() or np.isnan(props.dy_hy).any(): continue if (props.sharpness <= self.sharplo or props.sharpness >= self.sharphi): continue if (props.roundness1 <= self.roundlo or props.roundness1 >= self.roundhi): continue if (props.roundness2 <= self.roundlo or props.roundness2 >= self.roundhi): continue if self.peakmax is not None and props.peak >= self.peakmax: continue star_props.append(props) nstars = len(star_props) if nstars == 0: warnings.warn('Sources were found, but none pass the sharpness ' 'and roundness criteria.', NoDetectionsWarning) return None if self.brightest is not None: fluxes = [props.flux for props in star_props] idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist()) star_props = [star_props[k] for k in idx] nstars = len(star_props) table = Table() table['id'] = np.arange(nstars) + 1 columns = ('xcentroid', 'ycentroid', 'sharpness', 'roundness1', 'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag') for column in columns: table[column] = [getattr(props, column) for props in star_props] return table
def has_context_loop(state, incorrect_msg, exact_names): """When dispatched on loops, has_context the target vars are the attribute _target_vars. Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than one of its attributes (e.g. body). Purely for convenience. """ return _test( state, incorrect_msg or MSG_INCORRECT_LOOP, exact_names, tv_name="_target_vars", highlight_name="target", )
When dispatched on loops, has_context the target vars are the attribute _target_vars. Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than one of its attributes (e.g. body). Purely for convenience.
Below is the the instruction that describes the task: ### Input: When dispatched on loops, has_context the target vars are the attribute _target_vars. Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than one of its attributes (e.g. body). Purely for convenience. ### Response: def has_context_loop(state, incorrect_msg, exact_names): """When dispatched on loops, has_context the target vars are the attribute _target_vars. Note: This is to allow people to call has_context on a node (e.g. for_loop) rather than one of its attributes (e.g. body). Purely for convenience. """ return _test( state, incorrect_msg or MSG_INCORRECT_LOOP, exact_names, tv_name="_target_vars", highlight_name="target", )