repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
dmlc/gluon-nlp
scripts/bert/utils.py
load_vocab
def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with io.open(vocab_file, 'r') as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab
python
def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with io.open(vocab_file, 'r') as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab
[ "def", "load_vocab", "(", "vocab_file", ")", ":", "vocab", "=", "collections", ".", "OrderedDict", "(", ")", "index", "=", "0", "with", "io", ".", "open", "(", "vocab_file", ",", "'r'", ")", "as", "reader", ":", "while", "True", ":", "token", "=", "r...
Loads a vocabulary file into a dictionary.
[ "Loads", "a", "vocabulary", "file", "into", "a", "dictionary", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/utils.py#L125-L137
train
dmlc/gluon-nlp
src/gluonnlp/model/convolutional_encoder.py
ConvolutionalEncoder.hybrid_forward
def hybrid_forward(self, F, inputs, mask=None): # pylint: disable=arguments-differ r""" Forward computation for char_encoder Parameters ---------- inputs: NDArray The input tensor is of shape `(seq_len, batch_size, embedding_size)` TNC. mask: NDArray The mask applied to the input of shape `(seq_len, batch_size)`, the mask will be broadcasted along the embedding dimension. Returns ---------- output: NDArray The output of the encoder with shape `(batch_size, output_size)` """ if mask is not None: inputs = F.broadcast_mul(inputs, mask.expand_dims(-1)) inputs = F.transpose(inputs, axes=(1, 2, 0)) output = self._convs(inputs) if self._highways: output = self._highways(output) if self._projection: output = self._projection(output) return output
python
def hybrid_forward(self, F, inputs, mask=None): # pylint: disable=arguments-differ r""" Forward computation for char_encoder Parameters ---------- inputs: NDArray The input tensor is of shape `(seq_len, batch_size, embedding_size)` TNC. mask: NDArray The mask applied to the input of shape `(seq_len, batch_size)`, the mask will be broadcasted along the embedding dimension. Returns ---------- output: NDArray The output of the encoder with shape `(batch_size, output_size)` """ if mask is not None: inputs = F.broadcast_mul(inputs, mask.expand_dims(-1)) inputs = F.transpose(inputs, axes=(1, 2, 0)) output = self._convs(inputs) if self._highways: output = self._highways(output) if self._projection: output = self._projection(output) return output
[ "def", "hybrid_forward", "(", "self", ",", "F", ",", "inputs", ",", "mask", "=", "None", ")", ":", "# pylint: disable=arguments-differ", "if", "mask", "is", "not", "None", ":", "inputs", "=", "F", ".", "broadcast_mul", "(", "inputs", ",", "mask", ".", "e...
r""" Forward computation for char_encoder Parameters ---------- inputs: NDArray The input tensor is of shape `(seq_len, batch_size, embedding_size)` TNC. mask: NDArray The mask applied to the input of shape `(seq_len, batch_size)`, the mask will be broadcasted along the embedding dimension. Returns ---------- output: NDArray The output of the encoder with shape `(batch_size, output_size)`
[ "r", "Forward", "computation", "for", "char_encoder" ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/convolutional_encoder.py#L135-L166
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
_position_encoding_init
def _position_encoding_init(max_length, dim): """Init the sinusoid position encoding table """ position_enc = np.arange(max_length).reshape((-1, 1)) \ / (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1)))) # Apply the cosine to even columns and sin to odds. position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1 return position_enc
python
def _position_encoding_init(max_length, dim): """Init the sinusoid position encoding table """ position_enc = np.arange(max_length).reshape((-1, 1)) \ / (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1)))) # Apply the cosine to even columns and sin to odds. position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1 return position_enc
[ "def", "_position_encoding_init", "(", "max_length", ",", "dim", ")", ":", "position_enc", "=", "np", ".", "arange", "(", "max_length", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "/", "(", "np", ".", "power", "(", "10000", ",", "(...
Init the sinusoid position encoding table
[ "Init", "the", "sinusoid", "position", "encoding", "table" ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L46-L53
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
get_transformer_encoder_decoder
def get_transformer_encoder_decoder(num_layers=2, num_heads=8, scaled=True, units=512, hidden_size=2048, dropout=0.0, use_residual=True, max_src_length=50, max_tgt_length=50, weight_initializer=None, bias_initializer='zeros', prefix='transformer_', params=None): """Build a pair of Parallel Transformer encoder/decoder Parameters ---------- num_layers : int num_heads : int scaled : bool units : int hidden_size : int dropout : float use_residual : bool max_src_length : int max_tgt_length : int weight_initializer : mx.init.Initializer or None bias_initializer : mx.init.Initializer or None prefix : str, default 'transformer_' Prefix for name of `Block`s. params : Parameter or None Container for weight sharing between layers. Created if `None`. Returns ------- encoder : TransformerEncoder decoder :TransformerDecoder """ encoder = TransformerEncoder(num_layers=num_layers, num_heads=num_heads, max_length=max_src_length, units=units, hidden_size=hidden_size, dropout=dropout, scaled=scaled, use_residual=use_residual, weight_initializer=weight_initializer, bias_initializer=bias_initializer, prefix=prefix + 'enc_', params=params) decoder = TransformerDecoder(num_layers=num_layers, num_heads=num_heads, max_length=max_tgt_length, units=units, hidden_size=hidden_size, dropout=dropout, scaled=scaled, use_residual=use_residual, weight_initializer=weight_initializer, bias_initializer=bias_initializer, prefix=prefix + 'dec_', params=params) return encoder, decoder
python
def get_transformer_encoder_decoder(num_layers=2, num_heads=8, scaled=True, units=512, hidden_size=2048, dropout=0.0, use_residual=True, max_src_length=50, max_tgt_length=50, weight_initializer=None, bias_initializer='zeros', prefix='transformer_', params=None): """Build a pair of Parallel Transformer encoder/decoder Parameters ---------- num_layers : int num_heads : int scaled : bool units : int hidden_size : int dropout : float use_residual : bool max_src_length : int max_tgt_length : int weight_initializer : mx.init.Initializer or None bias_initializer : mx.init.Initializer or None prefix : str, default 'transformer_' Prefix for name of `Block`s. params : Parameter or None Container for weight sharing between layers. Created if `None`. Returns ------- encoder : TransformerEncoder decoder :TransformerDecoder """ encoder = TransformerEncoder(num_layers=num_layers, num_heads=num_heads, max_length=max_src_length, units=units, hidden_size=hidden_size, dropout=dropout, scaled=scaled, use_residual=use_residual, weight_initializer=weight_initializer, bias_initializer=bias_initializer, prefix=prefix + 'enc_', params=params) decoder = TransformerDecoder(num_layers=num_layers, num_heads=num_heads, max_length=max_tgt_length, units=units, hidden_size=hidden_size, dropout=dropout, scaled=scaled, use_residual=use_residual, weight_initializer=weight_initializer, bias_initializer=bias_initializer, prefix=prefix + 'dec_', params=params) return encoder, decoder
[ "def", "get_transformer_encoder_decoder", "(", "num_layers", "=", "2", ",", "num_heads", "=", "8", ",", "scaled", "=", "True", ",", "units", "=", "512", ",", "hidden_size", "=", "2048", ",", "dropout", "=", "0.0", ",", "use_residual", "=", "True", ",", "...
Build a pair of Parallel Transformer encoder/decoder Parameters ---------- num_layers : int num_heads : int scaled : bool units : int hidden_size : int dropout : float use_residual : bool max_src_length : int max_tgt_length : int weight_initializer : mx.init.Initializer or None bias_initializer : mx.init.Initializer or None prefix : str, default 'transformer_' Prefix for name of `Block`s. params : Parameter or None Container for weight sharing between layers. Created if `None`. Returns ------- encoder : TransformerEncoder decoder :TransformerDecoder
[ "Build", "a", "pair", "of", "Parallel", "Transformer", "encoder", "/", "decoder" ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L1123-L1177
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
transformer_en_de_512
def transformer_en_de_512(dataset_name=None, src_vocab=None, tgt_vocab=None, pretrained=False, ctx=cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs): r"""Transformer pretrained model. Embedding size is 400, and hidden layer size is 1150. Parameters ---------- dataset_name : str or None, default None src_vocab : gluonnlp.Vocab or None, default None tgt_vocab : gluonnlp.Vocab or None, default None pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab, gluonnlp.Vocab """ predefined_args = {'num_units': 512, 'hidden_size': 2048, 'dropout': 0.1, 'epsilon': 0.1, 'num_layers': 6, 'num_heads': 8, 'scaled': True, 'share_embed': True, 'embed_size': 512, 'tie_weights': True, 'embed_initializer': None} mutable_args = frozenset(['num_units', 'hidden_size', 'dropout', 'epsilon', 'num_layers', 'num_heads', 'scaled']) assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) encoder, decoder = get_transformer_encoder_decoder(units=predefined_args['num_units'], hidden_size=predefined_args['hidden_size'], dropout=predefined_args['dropout'], num_layers=predefined_args['num_layers'], num_heads=predefined_args['num_heads'], max_src_length=530, max_tgt_length=549, scaled=predefined_args['scaled']) return _get_transformer_model(NMTModel, 'transformer_en_de_512', dataset_name, src_vocab, tgt_vocab, encoder, decoder, predefined_args['share_embed'], predefined_args['embed_size'], predefined_args['tie_weights'], predefined_args['embed_initializer'], pretrained, ctx, root)
python
def transformer_en_de_512(dataset_name=None, src_vocab=None, tgt_vocab=None, pretrained=False, ctx=cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs): r"""Transformer pretrained model. Embedding size is 400, and hidden layer size is 1150. Parameters ---------- dataset_name : str or None, default None src_vocab : gluonnlp.Vocab or None, default None tgt_vocab : gluonnlp.Vocab or None, default None pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab, gluonnlp.Vocab """ predefined_args = {'num_units': 512, 'hidden_size': 2048, 'dropout': 0.1, 'epsilon': 0.1, 'num_layers': 6, 'num_heads': 8, 'scaled': True, 'share_embed': True, 'embed_size': 512, 'tie_weights': True, 'embed_initializer': None} mutable_args = frozenset(['num_units', 'hidden_size', 'dropout', 'epsilon', 'num_layers', 'num_heads', 'scaled']) assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ 'Cannot override predefined model settings.' predefined_args.update(kwargs) encoder, decoder = get_transformer_encoder_decoder(units=predefined_args['num_units'], hidden_size=predefined_args['hidden_size'], dropout=predefined_args['dropout'], num_layers=predefined_args['num_layers'], num_heads=predefined_args['num_heads'], max_src_length=530, max_tgt_length=549, scaled=predefined_args['scaled']) return _get_transformer_model(NMTModel, 'transformer_en_de_512', dataset_name, src_vocab, tgt_vocab, encoder, decoder, predefined_args['share_embed'], predefined_args['embed_size'], predefined_args['tie_weights'], predefined_args['embed_initializer'], pretrained, ctx, root)
[ "def", "transformer_en_de_512", "(", "dataset_name", "=", "None", ",", "src_vocab", "=", "None", ",", "tgt_vocab", "=", "None", ",", "pretrained", "=", "False", ",", "ctx", "=", "cpu", "(", ")", ",", "root", "=", "os", ".", "path", ".", "join", "(", ...
r"""Transformer pretrained model. Embedding size is 400, and hidden layer size is 1150. Parameters ---------- dataset_name : str or None, default None src_vocab : gluonnlp.Vocab or None, default None tgt_vocab : gluonnlp.Vocab or None, default None pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab, gluonnlp.Vocab
[ "r", "Transformer", "pretrained", "model", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L1200-L1251
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
BasePositionwiseFFN._get_activation
def _get_activation(self, act): """Get activation block based on the name. """ if isinstance(act, str): if act.lower() == 'gelu': return GELU() else: return gluon.nn.Activation(act) assert isinstance(act, gluon.Block) return act
python
def _get_activation(self, act): """Get activation block based on the name. """ if isinstance(act, str): if act.lower() == 'gelu': return GELU() else: return gluon.nn.Activation(act) assert isinstance(act, gluon.Block) return act
[ "def", "_get_activation", "(", "self", ",", "act", ")", ":", "if", "isinstance", "(", "act", ",", "str", ")", ":", "if", "act", ".", "lower", "(", ")", "==", "'gelu'", ":", "return", "GELU", "(", ")", "else", ":", "return", "gluon", ".", "nn", "....
Get activation block based on the name.
[ "Get", "activation", "block", "based", "on", "the", "name", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L116-L124
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
BasePositionwiseFFN.hybrid_forward
def hybrid_forward(self, F, inputs): # pylint: disable=arguments-differ # pylint: disable=unused-argument """Position-wise encoding of the inputs. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) Returns ------- outputs : Symbol or NDArray Shape (batch_size, length, C_out) """ outputs = self.ffn_1(inputs) if self.activation: outputs = self.activation(outputs) outputs = self.ffn_2(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm(outputs) return outputs
python
def hybrid_forward(self, F, inputs): # pylint: disable=arguments-differ # pylint: disable=unused-argument """Position-wise encoding of the inputs. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) Returns ------- outputs : Symbol or NDArray Shape (batch_size, length, C_out) """ outputs = self.ffn_1(inputs) if self.activation: outputs = self.activation(outputs) outputs = self.ffn_2(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm(outputs) return outputs
[ "def", "hybrid_forward", "(", "self", ",", "F", ",", "inputs", ")", ":", "# pylint: disable=arguments-differ", "# pylint: disable=unused-argument", "outputs", "=", "self", ".", "ffn_1", "(", "inputs", ")", "if", "self", ".", "activation", ":", "outputs", "=", "s...
Position-wise encoding of the inputs. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) Returns ------- outputs : Symbol or NDArray Shape (batch_size, length, C_out)
[ "Position", "-", "wise", "encoding", "of", "the", "inputs", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L126-L149
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
BaseTransformerEncoderCell.hybrid_forward
def hybrid_forward(self, F, inputs, mask=None): # pylint: disable=arguments-differ # pylint: disable=unused-argument """Transformer Encoder Attention Cell. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) mask : Symbol or NDArray or None Mask for inputs. Shape (batch_size, length, length) Returns ------- encoder_cell_outputs: list Outputs of the encoder cell. Contains: - outputs of the transformer encoder cell. Shape (batch_size, length, C_out) - additional_outputs of all the transformer encoder cell """ outputs, attention_weights =\ self.attention_cell(inputs, inputs, inputs, mask) outputs = self.proj(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm(outputs) outputs = self.ffn(outputs) additional_outputs = [] if self._output_attention: additional_outputs.append(attention_weights) return outputs, additional_outputs
python
def hybrid_forward(self, F, inputs, mask=None): # pylint: disable=arguments-differ # pylint: disable=unused-argument """Transformer Encoder Attention Cell. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) mask : Symbol or NDArray or None Mask for inputs. Shape (batch_size, length, length) Returns ------- encoder_cell_outputs: list Outputs of the encoder cell. Contains: - outputs of the transformer encoder cell. Shape (batch_size, length, C_out) - additional_outputs of all the transformer encoder cell """ outputs, attention_weights =\ self.attention_cell(inputs, inputs, inputs, mask) outputs = self.proj(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm(outputs) outputs = self.ffn(outputs) additional_outputs = [] if self._output_attention: additional_outputs.append(attention_weights) return outputs, additional_outputs
[ "def", "hybrid_forward", "(", "self", ",", "F", ",", "inputs", ",", "mask", "=", "None", ")", ":", "# pylint: disable=arguments-differ", "# pylint: disable=unused-argument", "outputs", ",", "attention_weights", "=", "self", ".", "attention_cell", "(", "inputs", ",",...
Transformer Encoder Attention Cell. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) mask : Symbol or NDArray or None Mask for inputs. Shape (batch_size, length, length) Returns ------- encoder_cell_outputs: list Outputs of the encoder cell. Contains: - outputs of the transformer encoder cell. Shape (batch_size, length, C_out) - additional_outputs of all the transformer encoder cell
[ "Transformer", "Encoder", "Attention", "Cell", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L236-L267
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
TransformerDecoderCell.hybrid_forward
def hybrid_forward(self, F, inputs, mem_value, mask=None, mem_mask=None): #pylint: disable=unused-argument # pylint: disable=arguments-differ """Transformer Decoder Attention Cell. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) mem_value : Symbol or NDArrays Memory value, i.e. output of the encoder. Shape (batch_size, mem_length, C_in) mask : Symbol or NDArray or None Mask for inputs. Shape (batch_size, length, length) mem_mask : Symbol or NDArray or None Mask for mem_value. Shape (batch_size, length, mem_length) Returns ------- decoder_cell_outputs: list Outputs of the decoder cell. Contains: - outputs of the transformer decoder cell. Shape (batch_size, length, C_out) - additional_outputs of all the transformer decoder cell """ outputs, attention_in_outputs =\ self.attention_cell_in(inputs, inputs, inputs, mask) outputs = self.proj_in(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm_in(outputs) inputs = outputs outputs, attention_inter_outputs = \ self.attention_cell_inter(inputs, mem_value, mem_value, mem_mask) outputs = self.proj_inter(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm_inter(outputs) outputs = self.ffn(outputs) additional_outputs = [] if self._output_attention: additional_outputs.append(attention_in_outputs) additional_outputs.append(attention_inter_outputs) return outputs, additional_outputs
python
def hybrid_forward(self, F, inputs, mem_value, mask=None, mem_mask=None): #pylint: disable=unused-argument # pylint: disable=arguments-differ """Transformer Decoder Attention Cell. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) mem_value : Symbol or NDArrays Memory value, i.e. output of the encoder. Shape (batch_size, mem_length, C_in) mask : Symbol or NDArray or None Mask for inputs. Shape (batch_size, length, length) mem_mask : Symbol or NDArray or None Mask for mem_value. Shape (batch_size, length, mem_length) Returns ------- decoder_cell_outputs: list Outputs of the decoder cell. Contains: - outputs of the transformer decoder cell. Shape (batch_size, length, C_out) - additional_outputs of all the transformer decoder cell """ outputs, attention_in_outputs =\ self.attention_cell_in(inputs, inputs, inputs, mask) outputs = self.proj_in(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm_in(outputs) inputs = outputs outputs, attention_inter_outputs = \ self.attention_cell_inter(inputs, mem_value, mem_value, mem_mask) outputs = self.proj_inter(outputs) if self._dropout: outputs = self.dropout_layer(outputs) if self._use_residual: outputs = outputs + inputs outputs = self.layer_norm_inter(outputs) outputs = self.ffn(outputs) additional_outputs = [] if self._output_attention: additional_outputs.append(attention_in_outputs) additional_outputs.append(attention_inter_outputs) return outputs, additional_outputs
[ "def", "hybrid_forward", "(", "self", ",", "F", ",", "inputs", ",", "mem_value", ",", "mask", "=", "None", ",", "mem_mask", "=", "None", ")", ":", "#pylint: disable=unused-argument", "# pylint: disable=arguments-differ", "outputs", ",", "attention_in_outputs", "=",...
Transformer Decoder Attention Cell. Parameters ---------- inputs : Symbol or NDArray Input sequence. Shape (batch_size, length, C_in) mem_value : Symbol or NDArrays Memory value, i.e. output of the encoder. Shape (batch_size, mem_length, C_in) mask : Symbol or NDArray or None Mask for inputs. Shape (batch_size, length, length) mem_mask : Symbol or NDArray or None Mask for mem_value. Shape (batch_size, length, mem_length) Returns ------- decoder_cell_outputs: list Outputs of the decoder cell. Contains: - outputs of the transformer decoder cell. Shape (batch_size, length, C_out) - additional_outputs of all the transformer decoder cell
[ "Transformer", "Decoder", "Attention", "Cell", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L778-L823
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
TransformerDecoder.init_state_from_encoder
def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None): """Initialize the state from the encoder outputs. Parameters ---------- encoder_outputs : list encoder_valid_length : NDArray or None Returns ------- decoder_states : list The decoder states, includes: - mem_value : NDArray - mem_masks : NDArray, optional """ mem_value = encoder_outputs decoder_states = [mem_value] mem_length = mem_value.shape[1] if encoder_valid_length is not None: dtype = encoder_valid_length.dtype ctx = encoder_valid_length.context mem_masks = mx.nd.broadcast_lesser( mx.nd.arange(mem_length, ctx=ctx, dtype=dtype).reshape((1, -1)), encoder_valid_length.reshape((-1, 1))) decoder_states.append(mem_masks) self._encoder_valid_length = encoder_valid_length return decoder_states
python
def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None): """Initialize the state from the encoder outputs. Parameters ---------- encoder_outputs : list encoder_valid_length : NDArray or None Returns ------- decoder_states : list The decoder states, includes: - mem_value : NDArray - mem_masks : NDArray, optional """ mem_value = encoder_outputs decoder_states = [mem_value] mem_length = mem_value.shape[1] if encoder_valid_length is not None: dtype = encoder_valid_length.dtype ctx = encoder_valid_length.context mem_masks = mx.nd.broadcast_lesser( mx.nd.arange(mem_length, ctx=ctx, dtype=dtype).reshape((1, -1)), encoder_valid_length.reshape((-1, 1))) decoder_states.append(mem_masks) self._encoder_valid_length = encoder_valid_length return decoder_states
[ "def", "init_state_from_encoder", "(", "self", ",", "encoder_outputs", ",", "encoder_valid_length", "=", "None", ")", ":", "mem_value", "=", "encoder_outputs", "decoder_states", "=", "[", "mem_value", "]", "mem_length", "=", "mem_value", ".", "shape", "[", "1", ...
Initialize the state from the encoder outputs. Parameters ---------- encoder_outputs : list encoder_valid_length : NDArray or None Returns ------- decoder_states : list The decoder states, includes: - mem_value : NDArray - mem_masks : NDArray, optional
[ "Initialize", "the", "state", "from", "the", "encoder", "outputs", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L905-L932
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
TransformerDecoder.decode_seq
def decode_seq(self, inputs, states, valid_length=None): """Decode the decoder inputs. This function is only used for training. Parameters ---------- inputs : NDArray, Shape (batch_size, length, C_in) states : list of NDArrays or None Initial states. The list of decoder states valid_length : NDArray or None Valid lengths of each sequence. This is usually used when part of sequence has been padded. Shape (batch_size,) Returns ------- output : NDArray, Shape (batch_size, length, C_out) states : list The decoder states, includes: - mem_value : NDArray - mem_masks : NDArray, optional additional_outputs : list of list Either be an empty list or contains the attention weights in this step. The attention weights will have shape (batch_size, length, mem_length) or (batch_size, num_heads, length, mem_length) """ batch_size = inputs.shape[0] length = inputs.shape[1] length_array = mx.nd.arange(length, ctx=inputs.context, dtype=inputs.dtype) mask = mx.nd.broadcast_lesser_equal( length_array.reshape((1, -1)), length_array.reshape((-1, 1))) if valid_length is not None: arange = mx.nd.arange(length, ctx=valid_length.context, dtype=valid_length.dtype) batch_mask = mx.nd.broadcast_lesser( arange.reshape((1, -1)), valid_length.reshape((-1, 1))) mask = mx.nd.broadcast_mul(mx.nd.expand_dims(batch_mask, -1), mx.nd.expand_dims(mask, 0)) else: mask = mx.nd.broadcast_axes(mx.nd.expand_dims(mask, axis=0), axis=0, size=batch_size) states = [None] + states output, states, additional_outputs = self.forward(inputs, states, mask) states = states[1:] if valid_length is not None: output = mx.nd.SequenceMask(output, sequence_length=valid_length, use_sequence_length=True, axis=1) return output, states, additional_outputs
python
def decode_seq(self, inputs, states, valid_length=None): """Decode the decoder inputs. This function is only used for training. Parameters ---------- inputs : NDArray, Shape (batch_size, length, C_in) states : list of NDArrays or None Initial states. The list of decoder states valid_length : NDArray or None Valid lengths of each sequence. This is usually used when part of sequence has been padded. Shape (batch_size,) Returns ------- output : NDArray, Shape (batch_size, length, C_out) states : list The decoder states, includes: - mem_value : NDArray - mem_masks : NDArray, optional additional_outputs : list of list Either be an empty list or contains the attention weights in this step. The attention weights will have shape (batch_size, length, mem_length) or (batch_size, num_heads, length, mem_length) """ batch_size = inputs.shape[0] length = inputs.shape[1] length_array = mx.nd.arange(length, ctx=inputs.context, dtype=inputs.dtype) mask = mx.nd.broadcast_lesser_equal( length_array.reshape((1, -1)), length_array.reshape((-1, 1))) if valid_length is not None: arange = mx.nd.arange(length, ctx=valid_length.context, dtype=valid_length.dtype) batch_mask = mx.nd.broadcast_lesser( arange.reshape((1, -1)), valid_length.reshape((-1, 1))) mask = mx.nd.broadcast_mul(mx.nd.expand_dims(batch_mask, -1), mx.nd.expand_dims(mask, 0)) else: mask = mx.nd.broadcast_axes(mx.nd.expand_dims(mask, axis=0), axis=0, size=batch_size) states = [None] + states output, states, additional_outputs = self.forward(inputs, states, mask) states = states[1:] if valid_length is not None: output = mx.nd.SequenceMask(output, sequence_length=valid_length, use_sequence_length=True, axis=1) return output, states, additional_outputs
[ "def", "decode_seq", "(", "self", ",", "inputs", ",", "states", ",", "valid_length", "=", "None", ")", ":", "batch_size", "=", "inputs", ".", "shape", "[", "0", "]", "length", "=", "inputs", ".", "shape", "[", "1", "]", "length_array", "=", "mx", "."...
Decode the decoder inputs. This function is only used for training. Parameters ---------- inputs : NDArray, Shape (batch_size, length, C_in) states : list of NDArrays or None Initial states. The list of decoder states valid_length : NDArray or None Valid lengths of each sequence. This is usually used when part of sequence has been padded. Shape (batch_size,) Returns ------- output : NDArray, Shape (batch_size, length, C_out) states : list The decoder states, includes: - mem_value : NDArray - mem_masks : NDArray, optional additional_outputs : list of list Either be an empty list or contains the attention weights in this step. The attention weights will have shape (batch_size, length, mem_length) or (batch_size, num_heads, length, mem_length)
[ "Decode", "the", "decoder", "inputs", ".", "This", "function", "is", "only", "used", "for", "training", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L934-L982
train
dmlc/gluon-nlp
src/gluonnlp/model/transformer.py
ParallelTransformer.forward_backward
def forward_backward(self, x): """Perform forward and backward computation for a batch of src seq and dst seq""" (src_seq, tgt_seq, src_valid_length, tgt_valid_length), batch_size = x with mx.autograd.record(): out, _ = self._model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1) smoothed_label = self._label_smoothing(tgt_seq[:, 1:]) ls = self._loss(out, smoothed_label, tgt_valid_length - 1).sum() ls = (ls * (tgt_seq.shape[1] - 1)) / batch_size / self._rescale_loss ls.backward() return ls
python
def forward_backward(self, x): """Perform forward and backward computation for a batch of src seq and dst seq""" (src_seq, tgt_seq, src_valid_length, tgt_valid_length), batch_size = x with mx.autograd.record(): out, _ = self._model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1) smoothed_label = self._label_smoothing(tgt_seq[:, 1:]) ls = self._loss(out, smoothed_label, tgt_valid_length - 1).sum() ls = (ls * (tgt_seq.shape[1] - 1)) / batch_size / self._rescale_loss ls.backward() return ls
[ "def", "forward_backward", "(", "self", ",", "x", ")", ":", "(", "src_seq", ",", "tgt_seq", ",", "src_valid_length", ",", "tgt_valid_length", ")", ",", "batch_size", "=", "x", "with", "mx", ".", "autograd", ".", "record", "(", ")", ":", "out", ",", "_"...
Perform forward and backward computation for a batch of src seq and dst seq
[ "Perform", "forward", "and", "backward", "computation", "for", "a", "batch", "of", "src", "seq", "and", "dst", "seq" ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/model/transformer.py#L1274-L1284
train
dmlc/gluon-nlp
scripts/natural_language_inference/main.py
parse_args
def parse_args(): """ Parse arguments. """ parser = argparse.ArgumentParser() parser.add_argument('--gpu-id', type=int, default=0, help='GPU id (-1 means CPU)') parser.add_argument('--train-file', default='snli_1.0/snli_1.0_train.txt', help='training set file') parser.add_argument('--test-file', default='snli_1.0/snli_1.0_dev.txt', help='validation set file') parser.add_argument('--max-num-examples', type=int, default=-1, help='maximum number of examples to load (for debugging)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--print-interval', type=int, default=20, help='the interval of two print') parser.add_argument('--mode', choices=['train', 'test'], default='train', help='train or test') parser.add_argument('--lr', type=float, default=0.025, help='learning rate') parser.add_argument('--epochs', type=int, default=300, help='maximum number of epochs to train') parser.add_argument('--embedding', default='glove', help='word embedding type') parser.add_argument('--embedding-source', default='glove.840B.300d', help='embedding file source') parser.add_argument('--embedding-size', type=int, default=300, help='size of pretrained word embedding') parser.add_argument('--hidden-size', type=int, default=200, help='hidden layer size') parser.add_argument('--output-dir', default='./output', help='directory for all experiment output') parser.add_argument('--model-dir', default='./output', help='directory to load model') parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument('--dropout', type=float, default=0., help='dropout rate') parser.add_argument('--weight-decay', type=float, default=0., help='l2 regularization weight') parser.add_argument('--intra-attention', action='store_true', help='use intra-sentence attention') return parser.parse_args()
python
def parse_args(): """ Parse arguments. """ parser = argparse.ArgumentParser() parser.add_argument('--gpu-id', type=int, default=0, help='GPU id (-1 means CPU)') parser.add_argument('--train-file', default='snli_1.0/snli_1.0_train.txt', help='training set file') parser.add_argument('--test-file', default='snli_1.0/snli_1.0_dev.txt', help='validation set file') parser.add_argument('--max-num-examples', type=int, default=-1, help='maximum number of examples to load (for debugging)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--print-interval', type=int, default=20, help='the interval of two print') parser.add_argument('--mode', choices=['train', 'test'], default='train', help='train or test') parser.add_argument('--lr', type=float, default=0.025, help='learning rate') parser.add_argument('--epochs', type=int, default=300, help='maximum number of epochs to train') parser.add_argument('--embedding', default='glove', help='word embedding type') parser.add_argument('--embedding-source', default='glove.840B.300d', help='embedding file source') parser.add_argument('--embedding-size', type=int, default=300, help='size of pretrained word embedding') parser.add_argument('--hidden-size', type=int, default=200, help='hidden layer size') parser.add_argument('--output-dir', default='./output', help='directory for all experiment output') parser.add_argument('--model-dir', default='./output', help='directory to load model') parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument('--dropout', type=float, default=0., help='dropout rate') parser.add_argument('--weight-decay', type=float, default=0., help='l2 regularization weight') parser.add_argument('--intra-attention', action='store_true', help='use intra-sentence attention') return parser.parse_args()
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'--gpu-id'", ",", "type", "=", "int", ",", "default", "=", "0", ",", "help", "=", "'GPU id (-1 means CPU)'", ")", "parser",...
Parse arguments.
[ "Parse", "arguments", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/natural_language_inference/main.py#L53-L97
train
dmlc/gluon-nlp
scripts/natural_language_inference/main.py
train_model
def train_model(model, train_data_loader, val_data_loader, embedding, ctx, args): """ Train model and validate/save every epoch. """ logger.info(vars(args)) # Initialization model.hybridize() model.collect_params().initialize(mx.init.Normal(0.01), ctx=ctx) model.word_emb.weight.set_data(embedding.idx_to_vec) # Fix word embedding model.word_emb.weight.grad_req = 'null' loss_func = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(model.collect_params(), 'adagrad', {'learning_rate': args.lr, 'wd': args.weight_decay, 'clip_gradient': 5}) checkpoints_dir = os.path.join(args.output_dir, 'checkpoints') if not os.path.exists(checkpoints_dir): os.makedirs(checkpoints_dir) best_val_acc = 0. for epoch_id in range(args.epochs): avg_loss = 0. avg_acc = 0. for batch_id, example in enumerate(train_data_loader): s1, s2, label = example s1 = s1.as_in_context(ctx) s2 = s2.as_in_context(ctx) label = label.as_in_context(ctx) with autograd.record(): output = model(s1, s2) loss = loss_func(output, label).mean() loss.backward() trainer.step(1) avg_loss += loss.sum().asscalar() pred = output.argmax(axis=1) acc = (pred == label.astype(np.float32)).mean() avg_acc += acc.asscalar() if (batch_id + 1) % args.print_interval == 0: avg_loss /= args.print_interval avg_acc /= args.print_interval logger.info('[Epoch {} Batch {}/{}] loss={:.4f}, acc={:.4f}' .format(epoch_id, batch_id + 1, len(train_data_loader), avg_loss, avg_acc)) avg_loss = 0. avg_acc = 0. # Validation val_loss, val_acc = test_model(model, val_data_loader, loss_func, ctx) if val_acc > best_val_acc: best_val_acc = val_acc checkpoint_path = os.path.join(args.output_dir, 'checkpoints', 'valid_best.params') model.save_parameters(checkpoint_path) logger.info('[Epoch {}] valid loss={:.4f}, valid acc={:.4f}, best valid acc={:.4f}' .format(epoch_id, val_loss, val_acc, best_val_acc)) # Save checkpoint of last epoch checkpoint_path = os.path.join(args.output_dir, 'checkpoints', 'last.params') model.save_parameters(checkpoint_path)
python
def train_model(model, train_data_loader, val_data_loader, embedding, ctx, args): """ Train model and validate/save every epoch. """ logger.info(vars(args)) # Initialization model.hybridize() model.collect_params().initialize(mx.init.Normal(0.01), ctx=ctx) model.word_emb.weight.set_data(embedding.idx_to_vec) # Fix word embedding model.word_emb.weight.grad_req = 'null' loss_func = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(model.collect_params(), 'adagrad', {'learning_rate': args.lr, 'wd': args.weight_decay, 'clip_gradient': 5}) checkpoints_dir = os.path.join(args.output_dir, 'checkpoints') if not os.path.exists(checkpoints_dir): os.makedirs(checkpoints_dir) best_val_acc = 0. for epoch_id in range(args.epochs): avg_loss = 0. avg_acc = 0. for batch_id, example in enumerate(train_data_loader): s1, s2, label = example s1 = s1.as_in_context(ctx) s2 = s2.as_in_context(ctx) label = label.as_in_context(ctx) with autograd.record(): output = model(s1, s2) loss = loss_func(output, label).mean() loss.backward() trainer.step(1) avg_loss += loss.sum().asscalar() pred = output.argmax(axis=1) acc = (pred == label.astype(np.float32)).mean() avg_acc += acc.asscalar() if (batch_id + 1) % args.print_interval == 0: avg_loss /= args.print_interval avg_acc /= args.print_interval logger.info('[Epoch {} Batch {}/{}] loss={:.4f}, acc={:.4f}' .format(epoch_id, batch_id + 1, len(train_data_loader), avg_loss, avg_acc)) avg_loss = 0. avg_acc = 0. # Validation val_loss, val_acc = test_model(model, val_data_loader, loss_func, ctx) if val_acc > best_val_acc: best_val_acc = val_acc checkpoint_path = os.path.join(args.output_dir, 'checkpoints', 'valid_best.params') model.save_parameters(checkpoint_path) logger.info('[Epoch {}] valid loss={:.4f}, valid acc={:.4f}, best valid acc={:.4f}' .format(epoch_id, val_loss, val_acc, best_val_acc)) # Save checkpoint of last epoch checkpoint_path = os.path.join(args.output_dir, 'checkpoints', 'last.params') model.save_parameters(checkpoint_path)
[ "def", "train_model", "(", "model", ",", "train_data_loader", ",", "val_data_loader", ",", "embedding", ",", "ctx", ",", "args", ")", ":", "logger", ".", "info", "(", "vars", "(", "args", ")", ")", "# Initialization", "model", ".", "hybridize", "(", ")", ...
Train model and validate/save every epoch.
[ "Train", "model", "and", "validate", "/", "save", "every", "epoch", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/natural_language_inference/main.py#L99-L163
train
dmlc/gluon-nlp
scripts/natural_language_inference/main.py
main
def main(args): """ Entry point: train or test. """ json.dump(vars(args), open(os.path.join(args.output_dir, 'config.json'), 'w')) if args.gpu_id == -1: ctx = mx.cpu() else: ctx = mx.gpu(args.gpu_id) mx.random.seed(args.seed, ctx=ctx) if args.mode == 'train': train_dataset = read_dataset(args, 'train_file') val_dataset = read_dataset(args, 'test_file') vocab_path = os.path.join(args.output_dir, 'vocab.jsons') if os.path.exists(vocab_path): vocab = nlp.Vocab.from_json(open(vocab_path).read()) else: vocab = build_vocab(train_dataset) with open(vocab_path, 'w') as fout: fout.write(vocab.to_json()) glove = nlp.embedding.create(args.embedding, source=args.embedding_source) vocab.set_embedding(glove) train_data_loader = prepare_data_loader(args, train_dataset, vocab) val_data_loader = prepare_data_loader(args, val_dataset, vocab, test=True) model = NLIModel(len(vocab), args.embedding_size, args.hidden_size, args.dropout, args.intra_attention) train_model(model, train_data_loader, val_data_loader, vocab.embedding, ctx, args) elif args.mode == 'test': model_args = argparse.Namespace(**json.load( open(os.path.join(args.model_dir, 'config.json')))) vocab = nlp.Vocab.from_json( open(os.path.join(args.model_dir, 'vocab.jsons')).read()) val_dataset = read_dataset(args, 'test_file') val_data_loader = prepare_data_loader(args, val_dataset, vocab, test=True) model = NLIModel(len(vocab), model_args.embedding_size, model_args.hidden_size, 0., model_args.intra_attention) model.load_parameters(os.path.join( args.model_dir, 'checkpoints', 'valid_best.params'), ctx=ctx) loss_func = gluon.loss.SoftmaxCrossEntropyLoss() logger.info('Test on {}'.format(args.test_file)) loss, acc = test_model(model, val_data_loader, loss_func, ctx) logger.info('loss={:.4f} acc={:.4f}'.format(loss, acc))
python
def main(args): """ Entry point: train or test. """ json.dump(vars(args), open(os.path.join(args.output_dir, 'config.json'), 'w')) if args.gpu_id == -1: ctx = mx.cpu() else: ctx = mx.gpu(args.gpu_id) mx.random.seed(args.seed, ctx=ctx) if args.mode == 'train': train_dataset = read_dataset(args, 'train_file') val_dataset = read_dataset(args, 'test_file') vocab_path = os.path.join(args.output_dir, 'vocab.jsons') if os.path.exists(vocab_path): vocab = nlp.Vocab.from_json(open(vocab_path).read()) else: vocab = build_vocab(train_dataset) with open(vocab_path, 'w') as fout: fout.write(vocab.to_json()) glove = nlp.embedding.create(args.embedding, source=args.embedding_source) vocab.set_embedding(glove) train_data_loader = prepare_data_loader(args, train_dataset, vocab) val_data_loader = prepare_data_loader(args, val_dataset, vocab, test=True) model = NLIModel(len(vocab), args.embedding_size, args.hidden_size, args.dropout, args.intra_attention) train_model(model, train_data_loader, val_data_loader, vocab.embedding, ctx, args) elif args.mode == 'test': model_args = argparse.Namespace(**json.load( open(os.path.join(args.model_dir, 'config.json')))) vocab = nlp.Vocab.from_json( open(os.path.join(args.model_dir, 'vocab.jsons')).read()) val_dataset = read_dataset(args, 'test_file') val_data_loader = prepare_data_loader(args, val_dataset, vocab, test=True) model = NLIModel(len(vocab), model_args.embedding_size, model_args.hidden_size, 0., model_args.intra_attention) model.load_parameters(os.path.join( args.model_dir, 'checkpoints', 'valid_best.params'), ctx=ctx) loss_func = gluon.loss.SoftmaxCrossEntropyLoss() logger.info('Test on {}'.format(args.test_file)) loss, acc = test_model(model, val_data_loader, loss_func, ctx) logger.info('loss={:.4f} acc={:.4f}'.format(loss, acc))
[ "def", "main", "(", "args", ")", ":", "json", ".", "dump", "(", "vars", "(", "args", ")", ",", "open", "(", "os", ".", "path", ".", "join", "(", "args", ".", "output_dir", ",", "'config.json'", ")", ",", "'w'", ")", ")", "if", "args", ".", "gpu...
Entry point: train or test.
[ "Entry", "point", ":", "train", "or", "test", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/natural_language_inference/main.py#L184-L231
train
dmlc/gluon-nlp
src/gluonnlp/data/candidate_sampler.py
UnigramCandidateSampler.hybrid_forward
def hybrid_forward(self, F, candidates_like, prob, alias): # pylint: disable=unused-argument """Draw samples from uniform distribution and return sampled candidates. Parameters ---------- candidates_like: mxnet.nd.NDArray or mxnet.sym.Symbol This input specifies the shape of the to be sampled candidates. # TODO shape selection is not yet supported. Shape must be specified in the constructor. Returns ------- samples: mxnet.nd.NDArray or mxnet.sym.Symbol The sampled candidates of shape candidates_like.shape. Candidates are sampled based on the weights specified on creation of the UnigramCandidateSampler. """ flat_shape = functools.reduce(operator.mul, self._shape) idx = F.random.uniform(low=0, high=self.N, shape=flat_shape, dtype='float64').floor() prob = F.gather_nd(prob, idx.reshape((1, -1))) alias = F.gather_nd(alias, idx.reshape((1, -1))) where = F.random.uniform(shape=flat_shape, dtype='float64') < prob hit = idx * where alt = alias * (1 - where) candidates = (hit + alt).reshape(self._shape) return candidates.astype(self._dtype)
python
def hybrid_forward(self, F, candidates_like, prob, alias): # pylint: disable=unused-argument """Draw samples from uniform distribution and return sampled candidates. Parameters ---------- candidates_like: mxnet.nd.NDArray or mxnet.sym.Symbol This input specifies the shape of the to be sampled candidates. # TODO shape selection is not yet supported. Shape must be specified in the constructor. Returns ------- samples: mxnet.nd.NDArray or mxnet.sym.Symbol The sampled candidates of shape candidates_like.shape. Candidates are sampled based on the weights specified on creation of the UnigramCandidateSampler. """ flat_shape = functools.reduce(operator.mul, self._shape) idx = F.random.uniform(low=0, high=self.N, shape=flat_shape, dtype='float64').floor() prob = F.gather_nd(prob, idx.reshape((1, -1))) alias = F.gather_nd(alias, idx.reshape((1, -1))) where = F.random.uniform(shape=flat_shape, dtype='float64') < prob hit = idx * where alt = alias * (1 - where) candidates = (hit + alt).reshape(self._shape) return candidates.astype(self._dtype)
[ "def", "hybrid_forward", "(", "self", ",", "F", ",", "candidates_like", ",", "prob", ",", "alias", ")", ":", "# pylint: disable=unused-argument", "flat_shape", "=", "functools", ".", "reduce", "(", "operator", ".", "mul", ",", "self", ".", "_shape", ")", "id...
Draw samples from uniform distribution and return sampled candidates. Parameters ---------- candidates_like: mxnet.nd.NDArray or mxnet.sym.Symbol This input specifies the shape of the to be sampled candidates. # TODO shape selection is not yet supported. Shape must be specified in the constructor. Returns ------- samples: mxnet.nd.NDArray or mxnet.sym.Symbol The sampled candidates of shape candidates_like.shape. Candidates are sampled based on the weights specified on creation of the UnigramCandidateSampler.
[ "Draw", "samples", "from", "uniform", "distribution", "and", "return", "sampled", "candidates", "." ]
4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/candidate_sampler.py#L105-L134
train
Delgan/loguru
loguru/_logger.py
Logger.add
def add( self, sink, *, level=_defaults.LOGURU_LEVEL, format=_defaults.LOGURU_FORMAT, filter=_defaults.LOGURU_FILTER, colorize=_defaults.LOGURU_COLORIZE, serialize=_defaults.LOGURU_SERIALIZE, backtrace=_defaults.LOGURU_BACKTRACE, diagnose=_defaults.LOGURU_DIAGNOSE, enqueue=_defaults.LOGURU_ENQUEUE, catch=_defaults.LOGURU_CATCH, **kwargs ): r"""Add a handler sending log messages to a sink adequately configured. Parameters ---------- sink : |file-like object|_, |str|, |Path|, |function|_, |Handler| or |class|_ An object in charge of receiving formatted logging messages and propagating them to an appropriate endpoint. level : |int| or |str|, optional The minimum severity level from which logged messages should be sent to the sink. format : |str| or |function|_, optional The template used to format logged messages before being sent to the sink. filter : |function|_ or |str|, optional A directive used to optionally filter out logged messages before they are sent to the sink. colorize : |bool|, optional Whether or not the color markups contained in the formatted message should be converted to ansi codes for terminal coloration, or stripped otherwise. If ``None``, the choice is automatically made based on the sink being a tty or not. serialize : |bool|, optional Whether or not the logged message and its records should be first converted to a JSON string before being sent to the sink. backtrace : |bool|, optional Whether or not the exception trace formatted should be extended upward, beyond the catching point, to show the full stacktrace which generated the error. diagnose : |bool|, optional Whether or not the exception trace should display the variables values to eases the debugging. This should be set to ``False`` in production to avoid leaking sensitive data. enqueue : |bool|, optional Whether or not the messages to be logged should first pass through a multiprocess-safe queue before reaching the sink. This is useful while logging to a file through multiple processes. catch : |bool|, optional Whether or not errors occurring while sink handles logs messages should be caught or not. If ``True``, an exception message is displayed on |sys.stderr| but the exception is not propagated to the caller, preventing your app to crash. **kwargs Additional parameters that will be passed to the sink while creating it or while logging messages (the exact behavior depends on the sink type). If and only if the sink is a file, the following parameters apply: Parameters ---------- rotation : |str|, |int|, |time|, |timedelta| or |function|_, optional A condition indicating whenever the current logged file should be closed and a new one started. retention : |str|, |int|, |timedelta| or |function|_, optional A directive filtering old files that should be removed during rotation or end of program. compression : |str| or |function|_, optional A compression or archive format to which log files should be converted at closure. delay : |bool|, optional Whether or not the file should be created as soon as the sink is configured, or delayed until first logged message. It defaults to ``False``. mode : |str|, optional The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the file in appending mode). buffering : |int|, optional The buffering policy as for built-in |open| function. It defaults to ``1`` (line buffered file). encoding : |str|, optional The file encoding as for built-in |open| function. If ``None``, it defaults to ``locale.getpreferredencoding()``. **kwargs Others parameters are passed to the built-in |open| function. Returns ------- :class:`int` An identifier associated with the added sink and which should be used to |remove| it. Notes ----- Extended summary follows. .. _sink: .. rubric:: The sink parameter The ``sink`` handles incoming log messages and proceed to their writing somewhere and somehow. A sink can take many forms: - A |file-like object|_ like ``sys.stderr`` or ``open("somefile.log", "w")``. Anything with a ``.write()`` method is considered as a file-like object. If it has a ``.flush()`` method, it will be automatically called after each logged message. If it has a ``.stop()`` method, it will be automatically called at sink termination. - A file path as |str| or |Path|. It can be parametrized with some additional parameters, see below. - A simple |function|_ like ``lambda msg: print(msg)``. This allows for logging procedure entirely defined by user preferences and needs. - A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records are automatically converted to the structure expected by the |logging| module. - A |class|_ object that will be used to instantiate the sink using ``**kwargs`` attributes passed. Hence the class should instantiate objects which are therefore valid sinks. Note that you should avoid using the ``logger`` inside any of your sinks as this would result in infinite recursion or dead lock if the module's sink was not explicitly disabled. .. _message: .. rubric:: The logged message The logged message passed to all added sinks is nothing more than a string of the formatted log, to which a special attribute is associated: the ``.record`` which is a dict containing all contextual information possibly needed (see below). Logged messages are formatted according to the ``format`` of the added sink. This format is usually a string containing braces fields to display attributes from the record dict. If fine-grained control is needed, the ``format`` can also be a function which takes the record as parameter and return the format template string. However, note that in such a case, you should take care of appending the line ending and exception field to the returned format, while ``"\n{exception}"`` is automatically appended for convenience if ``format`` is a string. The ``filter`` attribute can be used to control which messages are effectively passed to the sink and which one are ignored. A function can be used, accepting the record as an argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If a string is used, only the records with the same ``name`` and its children will be allowed. .. _levels: .. rubric:: The severity levels Each logged message is associated with a severity level. These levels make it possible to prioritize messages and to choose the verbosity of the logs according to usages. For example, it allows to display some debugging information to a developer, while hiding it to the end user running the application. The ``level`` attribute of every added sink controls the minimum threshold from which log messages are allowed to be emitted. While using the ``logger``, you are in charge of configuring the appropriate granularity of your logs. It is possible to add even more custom levels by using the |level| method. Here are the standard levels with their default severity value, each one is associated with a logging method of the same name: +----------------------+------------------------+------------------------+ | Level name | Severity value | Logger method | +======================+========================+========================+ | ``TRACE`` | 5 | |logger.trace| | +----------------------+------------------------+------------------------+ | ``DEBUG`` | 10 | |logger.debug| | +----------------------+------------------------+------------------------+ | ``INFO`` | 20 | |logger.info| | +----------------------+------------------------+------------------------+ | ``SUCCESS`` | 25 | |logger.success| | +----------------------+------------------------+------------------------+ | ``WARNING`` | 30 | |logger.warning| | +----------------------+------------------------+------------------------+ | ``ERROR`` | 40 | |logger.error| | +----------------------+------------------------+------------------------+ | ``CRITICAL`` | 50 | |logger.critical| | +----------------------+------------------------+------------------------+ .. _record: .. rubric:: The record dict The record is just a Python dict, accessible from sinks by ``message.record``. It contains all contextual information of the logging call (time, function, file, line, level, etc.). Each of its key can be used in the handler's ``format`` so the corresponding value is properly displayed in the logged message (eg. ``"{level}"`` -> ``"INFO"``). Some record's values are objects with two or more attributes, those can be formatted with ``"{key.attr}"`` (``"{key}"`` would display one by default). Formatting directives like ``"{key: >3}"`` also works and is particularly useful for time (see below). +------------+---------------------------------+----------------------------+ | Key | Description | Attributes | +============+=================================+============================+ | elapsed | The time elapsed since the | See |timedelta| | | | start of the program | | +------------+---------------------------------+----------------------------+ | exception | The formatted exception if any, | ``type``, ``value``, | | | ``None`` otherwise | ``traceback`` | +------------+---------------------------------+----------------------------+ | extra | The dict of attributes | None | | | bound by the user (see |bind|) | | +------------+---------------------------------+----------------------------+ | file | The file where the logging call | ``name`` (default), | | | was made | ``path`` | +------------+---------------------------------+----------------------------+ | function | The function from which the | None | | | logging call was made | | +------------+---------------------------------+----------------------------+ | level | The severity used to log the | ``name`` (default), | | | the message | ``no``, ``icon`` | +------------+---------------------------------+----------------------------+ | line | The line number in the source | None | | | code | | +------------+---------------------------------+----------------------------+ | message | The logged message (not yet | None | | | formatted) | | +------------+---------------------------------+----------------------------+ | module | The module where the logging | None | | | call was made | | +------------+---------------------------------+----------------------------+ | name | The ``__name__`` where the | None | | | logging call was made | | +------------+---------------------------------+----------------------------+ | process | The process in which the | ``name``, ``id`` (default) | | | logging call was made | | +------------+---------------------------------+----------------------------+ | thread | The thread in which the | ``name``, ``id`` (default) | | | logging call was made | | +------------+---------------------------------+----------------------------+ | time | The aware local time when the | See |datetime| | | | logging call was made | | +------------+---------------------------------+----------------------------+ .. _time: .. rubric:: The time formatting To use your favorite time representation, you can set it directly in the time formatter specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``. Note that this datetime represents your local time, and it is also made timezone-aware, so you can display the UTC offset to avoid ambiguities. The time field can be formatted using more human-friendly tokens. Those constitute a subset of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``. If no time formatter specifier is used, like for example if ``format="{time} {message}"``, the default one will use ISO 8601. +------------------------+---------+----------------------------------------+ | | Token | Output | +========================+=========+========================================+ | Year | YYYY | 2000, 2001, 2002 ... 2012, 2013 | | +---------+----------------------------------------+ | | YY | 00, 01, 02 ... 12, 13 | +------------------------+---------+----------------------------------------+ | Quarter | Q | 1 2 3 4 | +------------------------+---------+----------------------------------------+ | Month | MMMM | January, February, March ... | | +---------+----------------------------------------+ | | MMM | Jan, Feb, Mar ... | | +---------+----------------------------------------+ | | MM | 01, 02, 03 ... 11, 12 | | +---------+----------------------------------------+ | | M | 1, 2, 3 ... 11, 12 | +------------------------+---------+----------------------------------------+ | Day of Year | DDDD | 001, 002, 003 ... 364, 365 | | +---------+----------------------------------------+ | | DDD | 1, 2, 3 ... 364, 365 | +------------------------+---------+----------------------------------------+ | Day of Month | DD | 01, 02, 03 ... 30, 31 | | +---------+----------------------------------------+ | | D | 1, 2, 3 ... 30, 31 | +------------------------+---------+----------------------------------------+ | Day of Week | dddd | Monday, Tuesday, Wednesday ... | | +---------+----------------------------------------+ | | ddd | Mon, Tue, Wed ... | | +---------+----------------------------------------+ | | d | 0, 1, 2 ... 6 | +------------------------+---------+----------------------------------------+ | Days of ISO Week | E | 1, 2, 3 ... 7 | +------------------------+---------+----------------------------------------+ | Hour | HH | 00, 01, 02 ... 23, 24 | | +---------+----------------------------------------+ | | H | 0, 1, 2 ... 23, 24 | | +---------+----------------------------------------+ | | hh | 01, 02, 03 ... 11, 12 | | +---------+----------------------------------------+ | | h | 1, 2, 3 ... 11, 12 | +------------------------+---------+----------------------------------------+ | Minute | mm | 00, 01, 02 ... 58, 59 | | +---------+----------------------------------------+ | | m | 0, 1, 2 ... 58, 59 | +------------------------+---------+----------------------------------------+ | Second | ss | 00, 01, 02 ... 58, 59 | | +---------+----------------------------------------+ | | s | 0, 1, 2 ... 58, 59 | +------------------------+---------+----------------------------------------+ | Fractional Second | S | 0 1 ... 8 9 | | +---------+----------------------------------------+ | | SS | 00, 01, 02 ... 98, 99 | | +---------+----------------------------------------+ | | SSS | 000 001 ... 998 999 | | +---------+----------------------------------------+ | | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]| | +---------+----------------------------------------+ | | SSSSSS | 000000 000001 ... 999998 999999 | +------------------------+---------+----------------------------------------+ | AM / PM | A | AM, PM | +------------------------+---------+----------------------------------------+ | Timezone | Z | -07:00, -06:00 ... +06:00, +07:00 | | +---------+----------------------------------------+ | | ZZ | -0700, -0600 ... +0600, +0700 | | +---------+----------------------------------------+ | | zz | EST CST ... MST PST | +------------------------+---------+----------------------------------------+ | Seconds timestamp | X | 1381685817, 1234567890.123 | +------------------------+---------+----------------------------------------+ | Microseconds timestamp | x | 1234567890123 | +------------------------+---------+----------------------------------------+ .. _file: .. rubric:: The file sinks If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs. The path can also contain a special ``"{time}"`` field that will be formatted with the current date at file creation. The ``rotation`` check is made before logging each message. If there is already an existing file with the same name that the file to be created, then the existing file is renamed by appending the date to its basename to prevent file overwriting. This parameter accepts: - an |int| which corresponds to the maximum file size in bytes before that the current logged file is closed and a new one started over. - a |timedelta| which indicates the frequency of each new rotation. - a |time| which specifies the hour when the daily rotation should occur. - a |str| for human-friendly parametrization of one of the previously enumerated types. Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``, ``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ... - a |function|_ which will be called before logging. It should accept two arguments: the logged message and the file object, and it should return ``True`` if the rotation should happen now, ``False`` otherwise. The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files are selected according to their basename, if it is the same that the sink file, with possible time field being replaced with ``.*``. This parameter accepts: - an |int| which indicates the number of log files to keep, while older files are removed. - a |timedelta| which specifies the maximum age of files to keep. - a |str| for human-friendly parametrization of the maximum age of files to keep. Examples: ``"1 week, 3 days"``, ``"2 months"``, ... - a |function|_ which will be called before the retention process. It should accept the list of log files as argument and process to whatever it wants (moving files, removing them, etc.). The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This parameter accepts: - a |str| which corresponds to the compressed or archived file extension. This can be one of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``, ``"tar.xz"``, ``"zip"``. - a |function|_ which will be called before file termination. It should accept the path of the log file as argument and process to whatever it wants (custom compression, network sending, removing it, etc.). .. _color: .. rubric:: The color markups To add colors to your logs, you just have to enclose your format string with the appropriate tags. This is based on the great `ansimarkup`_ library from `@gvalkov`_. Those tags are removed if the sink don't support ansi codes. The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to the configured color of the logged message level. Here are the available tags (note that compatibility may vary depending on terminal): +------------------------------------+--------------------------------------+ | Color (abbr) | Styles (abbr) | +====================================+======================================+ | Black (k) | Bold (b) | +------------------------------------+--------------------------------------+ | Blue (e) | Dim (d) | +------------------------------------+--------------------------------------+ | Cyan (c) | Normal (n) | +------------------------------------+--------------------------------------+ | Green (g) | Italic (i) | +------------------------------------+--------------------------------------+ | Magenta (m) | Underline (u) | +------------------------------------+--------------------------------------+ | Red (r) | Strike (s) | +------------------------------------+--------------------------------------+ | White (w) | Reverse (r) | +------------------------------------+--------------------------------------+ | Yellow (y) | Blink (l) | +------------------------------------+--------------------------------------+ | | Hide (h) | +------------------------------------+--------------------------------------+ Usage: +-----------------+-------------------------------------------------------------------+ | Description | Examples | | +---------------------------------+---------------------------------+ | | Foreground | Background | +=================+=================================+=================================+ | Basic colors | ``<red>``, ``<r>`` | ``<GREEN>``, ``<G>`` | +-----------------+---------------------------------+---------------------------------+ | Light colors | ``<light-blue>``, ``<le>`` | ``<LIGHT-CYAN>``, ``<LC>`` | +-----------------+---------------------------------+---------------------------------+ | Xterm colors | ``<fg 86>``, ``<fg 255>`` | ``<bg 42>``, ``<bg 9>`` | +-----------------+---------------------------------+---------------------------------+ | Hex colors | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` | +-----------------+---------------------------------+---------------------------------+ | RGB colors | ``<fg 0,95,0>`` | ``<bg 72,119,65>`` | +-----------------+---------------------------------+---------------------------------+ | Stylizing | ``<bold>``, ``<b>`` , ``<underline>``, ``<u>`` | +-----------------+-------------------------------------------------------------------+ | Shorthand | ``<red, yellow>``, ``<r, y>`` | | (FG, BG) | | +-----------------+-------------------------------------------------------------------+ | Shorthand | ``<bold, cyan, white>``, ``<b,,w>``, ``<b,c,>`` | | (Style, FG, BG) | | +-----------------+-------------------------------------------------------------------+ .. _env: .. rubric:: The environment variables The default values of sink parameters can be entirely customized. This is particularly useful if you don't like the log format of the pre-configured sink. Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]`` environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"`` or ``export LOGURU_DIAGNOSE=NO``. The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]`` environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"`` or ``setx LOGURU_TRACE_ICON "🚀"``. If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT`` variable to ``False``. On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On Windows, don't forget to restart your terminal for the change to be taken into account. Examples -------- >>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module") >>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB") >>> def my_sink(message): ... record = message.record ... update_db(message, time=record.time, level=record.level) ... >>> logger.add(my_sink) >>> from logging import StreamHandler >>> logger.add(StreamHandler(sys.stderr), format="{message}") >>> class RandomStream: ... def __init__(self, seed, threshold): ... self.threshold = threshold ... random.seed(seed) ... def write(self, message): ... if random.random() > self.threshold: ... print(message) ... >>> stream_object = RandomStream(seed=12345, threhold=0.25) >>> logger.add(stream_object, level="INFO") >>> logger.add(RandomStream, level="DEBUG", seed=34567, threshold=0.5) """ if colorize is None and serialize: colorize = False if isclass(sink): sink = sink(**kwargs) return self.add( sink, level=level, format=format, filter=filter, colorize=colorize, serialize=serialize, backtrace=backtrace, diagnose=diagnose, enqueue=enqueue, catch=catch, ) elif isinstance(sink, (str, PathLike)): path = sink sink = FileSink(path, **kwargs) return self.add( sink, level=level, format=format, filter=filter, colorize=colorize, serialize=serialize, backtrace=backtrace, diagnose=diagnose, enqueue=enqueue, catch=catch, ) elif hasattr(sink, "write") and callable(sink.write): if colorize is False: stream = sink else: try: converter = AnsiToWin32(sink, convert=None, strip=False) isatty = converter.stream.isatty() except Exception: if colorize is None: colorize = False stream = sink else: if colorize is None: colorize = isatty if converter.should_wrap() and colorize: stream = converter.stream else: stream = sink stream_write = stream.write if kwargs: def write(m): return stream_write(m, **kwargs) else: write = stream_write if hasattr(stream, "flush") and callable(stream.flush): stream_flush = stream.flush def writer(m): write(m) stream_flush() else: writer = write if hasattr(stream, "stop") and callable(stream.stop): stopper = stream.stop else: def stopper(): return None elif isinstance(sink, logging.Handler): def writer(m): message = str(m) r = m.record exc = r["exception"] if not is_formatter_dynamic: message = message[:-1] record = logging.root.makeRecord( r["name"], r["level"].no, r["file"].path, r["line"], message, (), (exc.type, exc.value, exc.traceback) if exc else None, r["function"], r["extra"], **kwargs ) if exc: record.exc_text = "\n" sink.handle(record) stopper = sink.close if colorize is None: colorize = False elif callable(sink): if kwargs: def writer(m): return sink(m, **kwargs) else: writer = sink def stopper(): return None if colorize is None: colorize = False else: raise ValueError("Cannot log to objects of type '%s'." % type(sink).__name__) if filter is None or filter == "": filter_func = None elif isinstance(filter, str): parent = filter + "." length = len(parent) def filter_func(r): return (r["name"] + ".")[:length] == parent elif callable(filter): filter_func = filter else: raise ValueError( "Invalid filter, it should be a function or a string, not: '%s'" % type(filter).__name__ ) if isinstance(level, str): levelno = self.level(level).no elif isinstance(level, int): levelno = level else: raise ValueError( "Invalid level, it should be an integer or a string, not: '%s'" % type(level).__name__ ) if levelno < 0: raise ValueError( "Invalid level value, it should be a positive integer, not: %d" % levelno ) if isinstance(format, str): formatter = format + "\n{exception}" is_formatter_dynamic = False elif callable(format): formatter = format is_formatter_dynamic = True else: raise ValueError( "Invalid format, it should be a string or a function, not: '%s'" % type(format).__name__ ) try: encoding = sink.encoding except AttributeError: encoding = None if not encoding: encoding = "ascii" with self._lock: handler_id = next(self._handlers_count) colors = [lvl.color for lvl in self._levels.values()] + [""] exception_formatter = ExceptionFormatter( colorize=colorize, encoding=encoding, diagnose=diagnose, backtrace=backtrace, hidden_frames_filename=self.catch.__code__.co_filename, ) handler = Handler( writer=writer, stopper=stopper, levelno=levelno, formatter=formatter, is_formatter_dynamic=is_formatter_dynamic, filter_=filter_func, colorize=colorize, serialize=serialize, catch=catch, enqueue=enqueue, id_=handler_id, exception_formatter=exception_formatter, colors=colors, ) handlers = self._handlers.copy() handlers[handler_id] = handler self.__class__._min_level = min(self.__class__._min_level, levelno) self.__class__._handlers = handlers return handler_id
python
def add( self, sink, *, level=_defaults.LOGURU_LEVEL, format=_defaults.LOGURU_FORMAT, filter=_defaults.LOGURU_FILTER, colorize=_defaults.LOGURU_COLORIZE, serialize=_defaults.LOGURU_SERIALIZE, backtrace=_defaults.LOGURU_BACKTRACE, diagnose=_defaults.LOGURU_DIAGNOSE, enqueue=_defaults.LOGURU_ENQUEUE, catch=_defaults.LOGURU_CATCH, **kwargs ): r"""Add a handler sending log messages to a sink adequately configured. Parameters ---------- sink : |file-like object|_, |str|, |Path|, |function|_, |Handler| or |class|_ An object in charge of receiving formatted logging messages and propagating them to an appropriate endpoint. level : |int| or |str|, optional The minimum severity level from which logged messages should be sent to the sink. format : |str| or |function|_, optional The template used to format logged messages before being sent to the sink. filter : |function|_ or |str|, optional A directive used to optionally filter out logged messages before they are sent to the sink. colorize : |bool|, optional Whether or not the color markups contained in the formatted message should be converted to ansi codes for terminal coloration, or stripped otherwise. If ``None``, the choice is automatically made based on the sink being a tty or not. serialize : |bool|, optional Whether or not the logged message and its records should be first converted to a JSON string before being sent to the sink. backtrace : |bool|, optional Whether or not the exception trace formatted should be extended upward, beyond the catching point, to show the full stacktrace which generated the error. diagnose : |bool|, optional Whether or not the exception trace should display the variables values to eases the debugging. This should be set to ``False`` in production to avoid leaking sensitive data. enqueue : |bool|, optional Whether or not the messages to be logged should first pass through a multiprocess-safe queue before reaching the sink. This is useful while logging to a file through multiple processes. catch : |bool|, optional Whether or not errors occurring while sink handles logs messages should be caught or not. If ``True``, an exception message is displayed on |sys.stderr| but the exception is not propagated to the caller, preventing your app to crash. **kwargs Additional parameters that will be passed to the sink while creating it or while logging messages (the exact behavior depends on the sink type). If and only if the sink is a file, the following parameters apply: Parameters ---------- rotation : |str|, |int|, |time|, |timedelta| or |function|_, optional A condition indicating whenever the current logged file should be closed and a new one started. retention : |str|, |int|, |timedelta| or |function|_, optional A directive filtering old files that should be removed during rotation or end of program. compression : |str| or |function|_, optional A compression or archive format to which log files should be converted at closure. delay : |bool|, optional Whether or not the file should be created as soon as the sink is configured, or delayed until first logged message. It defaults to ``False``. mode : |str|, optional The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the file in appending mode). buffering : |int|, optional The buffering policy as for built-in |open| function. It defaults to ``1`` (line buffered file). encoding : |str|, optional The file encoding as for built-in |open| function. If ``None``, it defaults to ``locale.getpreferredencoding()``. **kwargs Others parameters are passed to the built-in |open| function. Returns ------- :class:`int` An identifier associated with the added sink and which should be used to |remove| it. Notes ----- Extended summary follows. .. _sink: .. rubric:: The sink parameter The ``sink`` handles incoming log messages and proceed to their writing somewhere and somehow. A sink can take many forms: - A |file-like object|_ like ``sys.stderr`` or ``open("somefile.log", "w")``. Anything with a ``.write()`` method is considered as a file-like object. If it has a ``.flush()`` method, it will be automatically called after each logged message. If it has a ``.stop()`` method, it will be automatically called at sink termination. - A file path as |str| or |Path|. It can be parametrized with some additional parameters, see below. - A simple |function|_ like ``lambda msg: print(msg)``. This allows for logging procedure entirely defined by user preferences and needs. - A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records are automatically converted to the structure expected by the |logging| module. - A |class|_ object that will be used to instantiate the sink using ``**kwargs`` attributes passed. Hence the class should instantiate objects which are therefore valid sinks. Note that you should avoid using the ``logger`` inside any of your sinks as this would result in infinite recursion or dead lock if the module's sink was not explicitly disabled. .. _message: .. rubric:: The logged message The logged message passed to all added sinks is nothing more than a string of the formatted log, to which a special attribute is associated: the ``.record`` which is a dict containing all contextual information possibly needed (see below). Logged messages are formatted according to the ``format`` of the added sink. This format is usually a string containing braces fields to display attributes from the record dict. If fine-grained control is needed, the ``format`` can also be a function which takes the record as parameter and return the format template string. However, note that in such a case, you should take care of appending the line ending and exception field to the returned format, while ``"\n{exception}"`` is automatically appended for convenience if ``format`` is a string. The ``filter`` attribute can be used to control which messages are effectively passed to the sink and which one are ignored. A function can be used, accepting the record as an argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If a string is used, only the records with the same ``name`` and its children will be allowed. .. _levels: .. rubric:: The severity levels Each logged message is associated with a severity level. These levels make it possible to prioritize messages and to choose the verbosity of the logs according to usages. For example, it allows to display some debugging information to a developer, while hiding it to the end user running the application. The ``level`` attribute of every added sink controls the minimum threshold from which log messages are allowed to be emitted. While using the ``logger``, you are in charge of configuring the appropriate granularity of your logs. It is possible to add even more custom levels by using the |level| method. Here are the standard levels with their default severity value, each one is associated with a logging method of the same name: +----------------------+------------------------+------------------------+ | Level name | Severity value | Logger method | +======================+========================+========================+ | ``TRACE`` | 5 | |logger.trace| | +----------------------+------------------------+------------------------+ | ``DEBUG`` | 10 | |logger.debug| | +----------------------+------------------------+------------------------+ | ``INFO`` | 20 | |logger.info| | +----------------------+------------------------+------------------------+ | ``SUCCESS`` | 25 | |logger.success| | +----------------------+------------------------+------------------------+ | ``WARNING`` | 30 | |logger.warning| | +----------------------+------------------------+------------------------+ | ``ERROR`` | 40 | |logger.error| | +----------------------+------------------------+------------------------+ | ``CRITICAL`` | 50 | |logger.critical| | +----------------------+------------------------+------------------------+ .. _record: .. rubric:: The record dict The record is just a Python dict, accessible from sinks by ``message.record``. It contains all contextual information of the logging call (time, function, file, line, level, etc.). Each of its key can be used in the handler's ``format`` so the corresponding value is properly displayed in the logged message (eg. ``"{level}"`` -> ``"INFO"``). Some record's values are objects with two or more attributes, those can be formatted with ``"{key.attr}"`` (``"{key}"`` would display one by default). Formatting directives like ``"{key: >3}"`` also works and is particularly useful for time (see below). +------------+---------------------------------+----------------------------+ | Key | Description | Attributes | +============+=================================+============================+ | elapsed | The time elapsed since the | See |timedelta| | | | start of the program | | +------------+---------------------------------+----------------------------+ | exception | The formatted exception if any, | ``type``, ``value``, | | | ``None`` otherwise | ``traceback`` | +------------+---------------------------------+----------------------------+ | extra | The dict of attributes | None | | | bound by the user (see |bind|) | | +------------+---------------------------------+----------------------------+ | file | The file where the logging call | ``name`` (default), | | | was made | ``path`` | +------------+---------------------------------+----------------------------+ | function | The function from which the | None | | | logging call was made | | +------------+---------------------------------+----------------------------+ | level | The severity used to log the | ``name`` (default), | | | the message | ``no``, ``icon`` | +------------+---------------------------------+----------------------------+ | line | The line number in the source | None | | | code | | +------------+---------------------------------+----------------------------+ | message | The logged message (not yet | None | | | formatted) | | +------------+---------------------------------+----------------------------+ | module | The module where the logging | None | | | call was made | | +------------+---------------------------------+----------------------------+ | name | The ``__name__`` where the | None | | | logging call was made | | +------------+---------------------------------+----------------------------+ | process | The process in which the | ``name``, ``id`` (default) | | | logging call was made | | +------------+---------------------------------+----------------------------+ | thread | The thread in which the | ``name``, ``id`` (default) | | | logging call was made | | +------------+---------------------------------+----------------------------+ | time | The aware local time when the | See |datetime| | | | logging call was made | | +------------+---------------------------------+----------------------------+ .. _time: .. rubric:: The time formatting To use your favorite time representation, you can set it directly in the time formatter specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``. Note that this datetime represents your local time, and it is also made timezone-aware, so you can display the UTC offset to avoid ambiguities. The time field can be formatted using more human-friendly tokens. Those constitute a subset of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``. If no time formatter specifier is used, like for example if ``format="{time} {message}"``, the default one will use ISO 8601. +------------------------+---------+----------------------------------------+ | | Token | Output | +========================+=========+========================================+ | Year | YYYY | 2000, 2001, 2002 ... 2012, 2013 | | +---------+----------------------------------------+ | | YY | 00, 01, 02 ... 12, 13 | +------------------------+---------+----------------------------------------+ | Quarter | Q | 1 2 3 4 | +------------------------+---------+----------------------------------------+ | Month | MMMM | January, February, March ... | | +---------+----------------------------------------+ | | MMM | Jan, Feb, Mar ... | | +---------+----------------------------------------+ | | MM | 01, 02, 03 ... 11, 12 | | +---------+----------------------------------------+ | | M | 1, 2, 3 ... 11, 12 | +------------------------+---------+----------------------------------------+ | Day of Year | DDDD | 001, 002, 003 ... 364, 365 | | +---------+----------------------------------------+ | | DDD | 1, 2, 3 ... 364, 365 | +------------------------+---------+----------------------------------------+ | Day of Month | DD | 01, 02, 03 ... 30, 31 | | +---------+----------------------------------------+ | | D | 1, 2, 3 ... 30, 31 | +------------------------+---------+----------------------------------------+ | Day of Week | dddd | Monday, Tuesday, Wednesday ... | | +---------+----------------------------------------+ | | ddd | Mon, Tue, Wed ... | | +---------+----------------------------------------+ | | d | 0, 1, 2 ... 6 | +------------------------+---------+----------------------------------------+ | Days of ISO Week | E | 1, 2, 3 ... 7 | +------------------------+---------+----------------------------------------+ | Hour | HH | 00, 01, 02 ... 23, 24 | | +---------+----------------------------------------+ | | H | 0, 1, 2 ... 23, 24 | | +---------+----------------------------------------+ | | hh | 01, 02, 03 ... 11, 12 | | +---------+----------------------------------------+ | | h | 1, 2, 3 ... 11, 12 | +------------------------+---------+----------------------------------------+ | Minute | mm | 00, 01, 02 ... 58, 59 | | +---------+----------------------------------------+ | | m | 0, 1, 2 ... 58, 59 | +------------------------+---------+----------------------------------------+ | Second | ss | 00, 01, 02 ... 58, 59 | | +---------+----------------------------------------+ | | s | 0, 1, 2 ... 58, 59 | +------------------------+---------+----------------------------------------+ | Fractional Second | S | 0 1 ... 8 9 | | +---------+----------------------------------------+ | | SS | 00, 01, 02 ... 98, 99 | | +---------+----------------------------------------+ | | SSS | 000 001 ... 998 999 | | +---------+----------------------------------------+ | | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]| | +---------+----------------------------------------+ | | SSSSSS | 000000 000001 ... 999998 999999 | +------------------------+---------+----------------------------------------+ | AM / PM | A | AM, PM | +------------------------+---------+----------------------------------------+ | Timezone | Z | -07:00, -06:00 ... +06:00, +07:00 | | +---------+----------------------------------------+ | | ZZ | -0700, -0600 ... +0600, +0700 | | +---------+----------------------------------------+ | | zz | EST CST ... MST PST | +------------------------+---------+----------------------------------------+ | Seconds timestamp | X | 1381685817, 1234567890.123 | +------------------------+---------+----------------------------------------+ | Microseconds timestamp | x | 1234567890123 | +------------------------+---------+----------------------------------------+ .. _file: .. rubric:: The file sinks If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs. The path can also contain a special ``"{time}"`` field that will be formatted with the current date at file creation. The ``rotation`` check is made before logging each message. If there is already an existing file with the same name that the file to be created, then the existing file is renamed by appending the date to its basename to prevent file overwriting. This parameter accepts: - an |int| which corresponds to the maximum file size in bytes before that the current logged file is closed and a new one started over. - a |timedelta| which indicates the frequency of each new rotation. - a |time| which specifies the hour when the daily rotation should occur. - a |str| for human-friendly parametrization of one of the previously enumerated types. Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``, ``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ... - a |function|_ which will be called before logging. It should accept two arguments: the logged message and the file object, and it should return ``True`` if the rotation should happen now, ``False`` otherwise. The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files are selected according to their basename, if it is the same that the sink file, with possible time field being replaced with ``.*``. This parameter accepts: - an |int| which indicates the number of log files to keep, while older files are removed. - a |timedelta| which specifies the maximum age of files to keep. - a |str| for human-friendly parametrization of the maximum age of files to keep. Examples: ``"1 week, 3 days"``, ``"2 months"``, ... - a |function|_ which will be called before the retention process. It should accept the list of log files as argument and process to whatever it wants (moving files, removing them, etc.). The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This parameter accepts: - a |str| which corresponds to the compressed or archived file extension. This can be one of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``, ``"tar.xz"``, ``"zip"``. - a |function|_ which will be called before file termination. It should accept the path of the log file as argument and process to whatever it wants (custom compression, network sending, removing it, etc.). .. _color: .. rubric:: The color markups To add colors to your logs, you just have to enclose your format string with the appropriate tags. This is based on the great `ansimarkup`_ library from `@gvalkov`_. Those tags are removed if the sink don't support ansi codes. The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to the configured color of the logged message level. Here are the available tags (note that compatibility may vary depending on terminal): +------------------------------------+--------------------------------------+ | Color (abbr) | Styles (abbr) | +====================================+======================================+ | Black (k) | Bold (b) | +------------------------------------+--------------------------------------+ | Blue (e) | Dim (d) | +------------------------------------+--------------------------------------+ | Cyan (c) | Normal (n) | +------------------------------------+--------------------------------------+ | Green (g) | Italic (i) | +------------------------------------+--------------------------------------+ | Magenta (m) | Underline (u) | +------------------------------------+--------------------------------------+ | Red (r) | Strike (s) | +------------------------------------+--------------------------------------+ | White (w) | Reverse (r) | +------------------------------------+--------------------------------------+ | Yellow (y) | Blink (l) | +------------------------------------+--------------------------------------+ | | Hide (h) | +------------------------------------+--------------------------------------+ Usage: +-----------------+-------------------------------------------------------------------+ | Description | Examples | | +---------------------------------+---------------------------------+ | | Foreground | Background | +=================+=================================+=================================+ | Basic colors | ``<red>``, ``<r>`` | ``<GREEN>``, ``<G>`` | +-----------------+---------------------------------+---------------------------------+ | Light colors | ``<light-blue>``, ``<le>`` | ``<LIGHT-CYAN>``, ``<LC>`` | +-----------------+---------------------------------+---------------------------------+ | Xterm colors | ``<fg 86>``, ``<fg 255>`` | ``<bg 42>``, ``<bg 9>`` | +-----------------+---------------------------------+---------------------------------+ | Hex colors | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` | +-----------------+---------------------------------+---------------------------------+ | RGB colors | ``<fg 0,95,0>`` | ``<bg 72,119,65>`` | +-----------------+---------------------------------+---------------------------------+ | Stylizing | ``<bold>``, ``<b>`` , ``<underline>``, ``<u>`` | +-----------------+-------------------------------------------------------------------+ | Shorthand | ``<red, yellow>``, ``<r, y>`` | | (FG, BG) | | +-----------------+-------------------------------------------------------------------+ | Shorthand | ``<bold, cyan, white>``, ``<b,,w>``, ``<b,c,>`` | | (Style, FG, BG) | | +-----------------+-------------------------------------------------------------------+ .. _env: .. rubric:: The environment variables The default values of sink parameters can be entirely customized. This is particularly useful if you don't like the log format of the pre-configured sink. Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]`` environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"`` or ``export LOGURU_DIAGNOSE=NO``. The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]`` environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"`` or ``setx LOGURU_TRACE_ICON "🚀"``. If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT`` variable to ``False``. On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On Windows, don't forget to restart your terminal for the change to be taken into account. Examples -------- >>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module") >>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB") >>> def my_sink(message): ... record = message.record ... update_db(message, time=record.time, level=record.level) ... >>> logger.add(my_sink) >>> from logging import StreamHandler >>> logger.add(StreamHandler(sys.stderr), format="{message}") >>> class RandomStream: ... def __init__(self, seed, threshold): ... self.threshold = threshold ... random.seed(seed) ... def write(self, message): ... if random.random() > self.threshold: ... print(message) ... >>> stream_object = RandomStream(seed=12345, threhold=0.25) >>> logger.add(stream_object, level="INFO") >>> logger.add(RandomStream, level="DEBUG", seed=34567, threshold=0.5) """ if colorize is None and serialize: colorize = False if isclass(sink): sink = sink(**kwargs) return self.add( sink, level=level, format=format, filter=filter, colorize=colorize, serialize=serialize, backtrace=backtrace, diagnose=diagnose, enqueue=enqueue, catch=catch, ) elif isinstance(sink, (str, PathLike)): path = sink sink = FileSink(path, **kwargs) return self.add( sink, level=level, format=format, filter=filter, colorize=colorize, serialize=serialize, backtrace=backtrace, diagnose=diagnose, enqueue=enqueue, catch=catch, ) elif hasattr(sink, "write") and callable(sink.write): if colorize is False: stream = sink else: try: converter = AnsiToWin32(sink, convert=None, strip=False) isatty = converter.stream.isatty() except Exception: if colorize is None: colorize = False stream = sink else: if colorize is None: colorize = isatty if converter.should_wrap() and colorize: stream = converter.stream else: stream = sink stream_write = stream.write if kwargs: def write(m): return stream_write(m, **kwargs) else: write = stream_write if hasattr(stream, "flush") and callable(stream.flush): stream_flush = stream.flush def writer(m): write(m) stream_flush() else: writer = write if hasattr(stream, "stop") and callable(stream.stop): stopper = stream.stop else: def stopper(): return None elif isinstance(sink, logging.Handler): def writer(m): message = str(m) r = m.record exc = r["exception"] if not is_formatter_dynamic: message = message[:-1] record = logging.root.makeRecord( r["name"], r["level"].no, r["file"].path, r["line"], message, (), (exc.type, exc.value, exc.traceback) if exc else None, r["function"], r["extra"], **kwargs ) if exc: record.exc_text = "\n" sink.handle(record) stopper = sink.close if colorize is None: colorize = False elif callable(sink): if kwargs: def writer(m): return sink(m, **kwargs) else: writer = sink def stopper(): return None if colorize is None: colorize = False else: raise ValueError("Cannot log to objects of type '%s'." % type(sink).__name__) if filter is None or filter == "": filter_func = None elif isinstance(filter, str): parent = filter + "." length = len(parent) def filter_func(r): return (r["name"] + ".")[:length] == parent elif callable(filter): filter_func = filter else: raise ValueError( "Invalid filter, it should be a function or a string, not: '%s'" % type(filter).__name__ ) if isinstance(level, str): levelno = self.level(level).no elif isinstance(level, int): levelno = level else: raise ValueError( "Invalid level, it should be an integer or a string, not: '%s'" % type(level).__name__ ) if levelno < 0: raise ValueError( "Invalid level value, it should be a positive integer, not: %d" % levelno ) if isinstance(format, str): formatter = format + "\n{exception}" is_formatter_dynamic = False elif callable(format): formatter = format is_formatter_dynamic = True else: raise ValueError( "Invalid format, it should be a string or a function, not: '%s'" % type(format).__name__ ) try: encoding = sink.encoding except AttributeError: encoding = None if not encoding: encoding = "ascii" with self._lock: handler_id = next(self._handlers_count) colors = [lvl.color for lvl in self._levels.values()] + [""] exception_formatter = ExceptionFormatter( colorize=colorize, encoding=encoding, diagnose=diagnose, backtrace=backtrace, hidden_frames_filename=self.catch.__code__.co_filename, ) handler = Handler( writer=writer, stopper=stopper, levelno=levelno, formatter=formatter, is_formatter_dynamic=is_formatter_dynamic, filter_=filter_func, colorize=colorize, serialize=serialize, catch=catch, enqueue=enqueue, id_=handler_id, exception_formatter=exception_formatter, colors=colors, ) handlers = self._handlers.copy() handlers[handler_id] = handler self.__class__._min_level = min(self.__class__._min_level, levelno) self.__class__._handlers = handlers return handler_id
[ "def", "add", "(", "self", ",", "sink", ",", "*", ",", "level", "=", "_defaults", ".", "LOGURU_LEVEL", ",", "format", "=", "_defaults", ".", "LOGURU_FORMAT", ",", "filter", "=", "_defaults", ".", "LOGURU_FILTER", ",", "colorize", "=", "_defaults", ".", "...
r"""Add a handler sending log messages to a sink adequately configured. Parameters ---------- sink : |file-like object|_, |str|, |Path|, |function|_, |Handler| or |class|_ An object in charge of receiving formatted logging messages and propagating them to an appropriate endpoint. level : |int| or |str|, optional The minimum severity level from which logged messages should be sent to the sink. format : |str| or |function|_, optional The template used to format logged messages before being sent to the sink. filter : |function|_ or |str|, optional A directive used to optionally filter out logged messages before they are sent to the sink. colorize : |bool|, optional Whether or not the color markups contained in the formatted message should be converted to ansi codes for terminal coloration, or stripped otherwise. If ``None``, the choice is automatically made based on the sink being a tty or not. serialize : |bool|, optional Whether or not the logged message and its records should be first converted to a JSON string before being sent to the sink. backtrace : |bool|, optional Whether or not the exception trace formatted should be extended upward, beyond the catching point, to show the full stacktrace which generated the error. diagnose : |bool|, optional Whether or not the exception trace should display the variables values to eases the debugging. This should be set to ``False`` in production to avoid leaking sensitive data. enqueue : |bool|, optional Whether or not the messages to be logged should first pass through a multiprocess-safe queue before reaching the sink. This is useful while logging to a file through multiple processes. catch : |bool|, optional Whether or not errors occurring while sink handles logs messages should be caught or not. If ``True``, an exception message is displayed on |sys.stderr| but the exception is not propagated to the caller, preventing your app to crash. **kwargs Additional parameters that will be passed to the sink while creating it or while logging messages (the exact behavior depends on the sink type). If and only if the sink is a file, the following parameters apply: Parameters ---------- rotation : |str|, |int|, |time|, |timedelta| or |function|_, optional A condition indicating whenever the current logged file should be closed and a new one started. retention : |str|, |int|, |timedelta| or |function|_, optional A directive filtering old files that should be removed during rotation or end of program. compression : |str| or |function|_, optional A compression or archive format to which log files should be converted at closure. delay : |bool|, optional Whether or not the file should be created as soon as the sink is configured, or delayed until first logged message. It defaults to ``False``. mode : |str|, optional The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the file in appending mode). buffering : |int|, optional The buffering policy as for built-in |open| function. It defaults to ``1`` (line buffered file). encoding : |str|, optional The file encoding as for built-in |open| function. If ``None``, it defaults to ``locale.getpreferredencoding()``. **kwargs Others parameters are passed to the built-in |open| function. Returns ------- :class:`int` An identifier associated with the added sink and which should be used to |remove| it. Notes ----- Extended summary follows. .. _sink: .. rubric:: The sink parameter The ``sink`` handles incoming log messages and proceed to their writing somewhere and somehow. A sink can take many forms: - A |file-like object|_ like ``sys.stderr`` or ``open("somefile.log", "w")``. Anything with a ``.write()`` method is considered as a file-like object. If it has a ``.flush()`` method, it will be automatically called after each logged message. If it has a ``.stop()`` method, it will be automatically called at sink termination. - A file path as |str| or |Path|. It can be parametrized with some additional parameters, see below. - A simple |function|_ like ``lambda msg: print(msg)``. This allows for logging procedure entirely defined by user preferences and needs. - A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records are automatically converted to the structure expected by the |logging| module. - A |class|_ object that will be used to instantiate the sink using ``**kwargs`` attributes passed. Hence the class should instantiate objects which are therefore valid sinks. Note that you should avoid using the ``logger`` inside any of your sinks as this would result in infinite recursion or dead lock if the module's sink was not explicitly disabled. .. _message: .. rubric:: The logged message The logged message passed to all added sinks is nothing more than a string of the formatted log, to which a special attribute is associated: the ``.record`` which is a dict containing all contextual information possibly needed (see below). Logged messages are formatted according to the ``format`` of the added sink. This format is usually a string containing braces fields to display attributes from the record dict. If fine-grained control is needed, the ``format`` can also be a function which takes the record as parameter and return the format template string. However, note that in such a case, you should take care of appending the line ending and exception field to the returned format, while ``"\n{exception}"`` is automatically appended for convenience if ``format`` is a string. The ``filter`` attribute can be used to control which messages are effectively passed to the sink and which one are ignored. A function can be used, accepting the record as an argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If a string is used, only the records with the same ``name`` and its children will be allowed. .. _levels: .. rubric:: The severity levels Each logged message is associated with a severity level. These levels make it possible to prioritize messages and to choose the verbosity of the logs according to usages. For example, it allows to display some debugging information to a developer, while hiding it to the end user running the application. The ``level`` attribute of every added sink controls the minimum threshold from which log messages are allowed to be emitted. While using the ``logger``, you are in charge of configuring the appropriate granularity of your logs. It is possible to add even more custom levels by using the |level| method. Here are the standard levels with their default severity value, each one is associated with a logging method of the same name: +----------------------+------------------------+------------------------+ | Level name | Severity value | Logger method | +======================+========================+========================+ | ``TRACE`` | 5 | |logger.trace| | +----------------------+------------------------+------------------------+ | ``DEBUG`` | 10 | |logger.debug| | +----------------------+------------------------+------------------------+ | ``INFO`` | 20 | |logger.info| | +----------------------+------------------------+------------------------+ | ``SUCCESS`` | 25 | |logger.success| | +----------------------+------------------------+------------------------+ | ``WARNING`` | 30 | |logger.warning| | +----------------------+------------------------+------------------------+ | ``ERROR`` | 40 | |logger.error| | +----------------------+------------------------+------------------------+ | ``CRITICAL`` | 50 | |logger.critical| | +----------------------+------------------------+------------------------+ .. _record: .. rubric:: The record dict The record is just a Python dict, accessible from sinks by ``message.record``. It contains all contextual information of the logging call (time, function, file, line, level, etc.). Each of its key can be used in the handler's ``format`` so the corresponding value is properly displayed in the logged message (eg. ``"{level}"`` -> ``"INFO"``). Some record's values are objects with two or more attributes, those can be formatted with ``"{key.attr}"`` (``"{key}"`` would display one by default). Formatting directives like ``"{key: >3}"`` also works and is particularly useful for time (see below). +------------+---------------------------------+----------------------------+ | Key | Description | Attributes | +============+=================================+============================+ | elapsed | The time elapsed since the | See |timedelta| | | | start of the program | | +------------+---------------------------------+----------------------------+ | exception | The formatted exception if any, | ``type``, ``value``, | | | ``None`` otherwise | ``traceback`` | +------------+---------------------------------+----------------------------+ | extra | The dict of attributes | None | | | bound by the user (see |bind|) | | +------------+---------------------------------+----------------------------+ | file | The file where the logging call | ``name`` (default), | | | was made | ``path`` | +------------+---------------------------------+----------------------------+ | function | The function from which the | None | | | logging call was made | | +------------+---------------------------------+----------------------------+ | level | The severity used to log the | ``name`` (default), | | | the message | ``no``, ``icon`` | +------------+---------------------------------+----------------------------+ | line | The line number in the source | None | | | code | | +------------+---------------------------------+----------------------------+ | message | The logged message (not yet | None | | | formatted) | | +------------+---------------------------------+----------------------------+ | module | The module where the logging | None | | | call was made | | +------------+---------------------------------+----------------------------+ | name | The ``__name__`` where the | None | | | logging call was made | | +------------+---------------------------------+----------------------------+ | process | The process in which the | ``name``, ``id`` (default) | | | logging call was made | | +------------+---------------------------------+----------------------------+ | thread | The thread in which the | ``name``, ``id`` (default) | | | logging call was made | | +------------+---------------------------------+----------------------------+ | time | The aware local time when the | See |datetime| | | | logging call was made | | +------------+---------------------------------+----------------------------+ .. _time: .. rubric:: The time formatting To use your favorite time representation, you can set it directly in the time formatter specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``. Note that this datetime represents your local time, and it is also made timezone-aware, so you can display the UTC offset to avoid ambiguities. The time field can be formatted using more human-friendly tokens. Those constitute a subset of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``. If no time formatter specifier is used, like for example if ``format="{time} {message}"``, the default one will use ISO 8601. +------------------------+---------+----------------------------------------+ | | Token | Output | +========================+=========+========================================+ | Year | YYYY | 2000, 2001, 2002 ... 2012, 2013 | | +---------+----------------------------------------+ | | YY | 00, 01, 02 ... 12, 13 | +------------------------+---------+----------------------------------------+ | Quarter | Q | 1 2 3 4 | +------------------------+---------+----------------------------------------+ | Month | MMMM | January, February, March ... | | +---------+----------------------------------------+ | | MMM | Jan, Feb, Mar ... | | +---------+----------------------------------------+ | | MM | 01, 02, 03 ... 11, 12 | | +---------+----------------------------------------+ | | M | 1, 2, 3 ... 11, 12 | +------------------------+---------+----------------------------------------+ | Day of Year | DDDD | 001, 002, 003 ... 364, 365 | | +---------+----------------------------------------+ | | DDD | 1, 2, 3 ... 364, 365 | +------------------------+---------+----------------------------------------+ | Day of Month | DD | 01, 02, 03 ... 30, 31 | | +---------+----------------------------------------+ | | D | 1, 2, 3 ... 30, 31 | +------------------------+---------+----------------------------------------+ | Day of Week | dddd | Monday, Tuesday, Wednesday ... | | +---------+----------------------------------------+ | | ddd | Mon, Tue, Wed ... | | +---------+----------------------------------------+ | | d | 0, 1, 2 ... 6 | +------------------------+---------+----------------------------------------+ | Days of ISO Week | E | 1, 2, 3 ... 7 | +------------------------+---------+----------------------------------------+ | Hour | HH | 00, 01, 02 ... 23, 24 | | +---------+----------------------------------------+ | | H | 0, 1, 2 ... 23, 24 | | +---------+----------------------------------------+ | | hh | 01, 02, 03 ... 11, 12 | | +---------+----------------------------------------+ | | h | 1, 2, 3 ... 11, 12 | +------------------------+---------+----------------------------------------+ | Minute | mm | 00, 01, 02 ... 58, 59 | | +---------+----------------------------------------+ | | m | 0, 1, 2 ... 58, 59 | +------------------------+---------+----------------------------------------+ | Second | ss | 00, 01, 02 ... 58, 59 | | +---------+----------------------------------------+ | | s | 0, 1, 2 ... 58, 59 | +------------------------+---------+----------------------------------------+ | Fractional Second | S | 0 1 ... 8 9 | | +---------+----------------------------------------+ | | SS | 00, 01, 02 ... 98, 99 | | +---------+----------------------------------------+ | | SSS | 000 001 ... 998 999 | | +---------+----------------------------------------+ | | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]| | +---------+----------------------------------------+ | | SSSSSS | 000000 000001 ... 999998 999999 | +------------------------+---------+----------------------------------------+ | AM / PM | A | AM, PM | +------------------------+---------+----------------------------------------+ | Timezone | Z | -07:00, -06:00 ... +06:00, +07:00 | | +---------+----------------------------------------+ | | ZZ | -0700, -0600 ... +0600, +0700 | | +---------+----------------------------------------+ | | zz | EST CST ... MST PST | +------------------------+---------+----------------------------------------+ | Seconds timestamp | X | 1381685817, 1234567890.123 | +------------------------+---------+----------------------------------------+ | Microseconds timestamp | x | 1234567890123 | +------------------------+---------+----------------------------------------+ .. _file: .. rubric:: The file sinks If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs. The path can also contain a special ``"{time}"`` field that will be formatted with the current date at file creation. The ``rotation`` check is made before logging each message. If there is already an existing file with the same name that the file to be created, then the existing file is renamed by appending the date to its basename to prevent file overwriting. This parameter accepts: - an |int| which corresponds to the maximum file size in bytes before that the current logged file is closed and a new one started over. - a |timedelta| which indicates the frequency of each new rotation. - a |time| which specifies the hour when the daily rotation should occur. - a |str| for human-friendly parametrization of one of the previously enumerated types. Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``, ``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ... - a |function|_ which will be called before logging. It should accept two arguments: the logged message and the file object, and it should return ``True`` if the rotation should happen now, ``False`` otherwise. The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files are selected according to their basename, if it is the same that the sink file, with possible time field being replaced with ``.*``. This parameter accepts: - an |int| which indicates the number of log files to keep, while older files are removed. - a |timedelta| which specifies the maximum age of files to keep. - a |str| for human-friendly parametrization of the maximum age of files to keep. Examples: ``"1 week, 3 days"``, ``"2 months"``, ... - a |function|_ which will be called before the retention process. It should accept the list of log files as argument and process to whatever it wants (moving files, removing them, etc.). The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This parameter accepts: - a |str| which corresponds to the compressed or archived file extension. This can be one of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``, ``"tar.xz"``, ``"zip"``. - a |function|_ which will be called before file termination. It should accept the path of the log file as argument and process to whatever it wants (custom compression, network sending, removing it, etc.). .. _color: .. rubric:: The color markups To add colors to your logs, you just have to enclose your format string with the appropriate tags. This is based on the great `ansimarkup`_ library from `@gvalkov`_. Those tags are removed if the sink don't support ansi codes. The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to the configured color of the logged message level. Here are the available tags (note that compatibility may vary depending on terminal): +------------------------------------+--------------------------------------+ | Color (abbr) | Styles (abbr) | +====================================+======================================+ | Black (k) | Bold (b) | +------------------------------------+--------------------------------------+ | Blue (e) | Dim (d) | +------------------------------------+--------------------------------------+ | Cyan (c) | Normal (n) | +------------------------------------+--------------------------------------+ | Green (g) | Italic (i) | +------------------------------------+--------------------------------------+ | Magenta (m) | Underline (u) | +------------------------------------+--------------------------------------+ | Red (r) | Strike (s) | +------------------------------------+--------------------------------------+ | White (w) | Reverse (r) | +------------------------------------+--------------------------------------+ | Yellow (y) | Blink (l) | +------------------------------------+--------------------------------------+ | | Hide (h) | +------------------------------------+--------------------------------------+ Usage: +-----------------+-------------------------------------------------------------------+ | Description | Examples | | +---------------------------------+---------------------------------+ | | Foreground | Background | +=================+=================================+=================================+ | Basic colors | ``<red>``, ``<r>`` | ``<GREEN>``, ``<G>`` | +-----------------+---------------------------------+---------------------------------+ | Light colors | ``<light-blue>``, ``<le>`` | ``<LIGHT-CYAN>``, ``<LC>`` | +-----------------+---------------------------------+---------------------------------+ | Xterm colors | ``<fg 86>``, ``<fg 255>`` | ``<bg 42>``, ``<bg 9>`` | +-----------------+---------------------------------+---------------------------------+ | Hex colors | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` | +-----------------+---------------------------------+---------------------------------+ | RGB colors | ``<fg 0,95,0>`` | ``<bg 72,119,65>`` | +-----------------+---------------------------------+---------------------------------+ | Stylizing | ``<bold>``, ``<b>`` , ``<underline>``, ``<u>`` | +-----------------+-------------------------------------------------------------------+ | Shorthand | ``<red, yellow>``, ``<r, y>`` | | (FG, BG) | | +-----------------+-------------------------------------------------------------------+ | Shorthand | ``<bold, cyan, white>``, ``<b,,w>``, ``<b,c,>`` | | (Style, FG, BG) | | +-----------------+-------------------------------------------------------------------+ .. _env: .. rubric:: The environment variables The default values of sink parameters can be entirely customized. This is particularly useful if you don't like the log format of the pre-configured sink. Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]`` environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"`` or ``export LOGURU_DIAGNOSE=NO``. The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]`` environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"`` or ``setx LOGURU_TRACE_ICON "🚀"``. If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT`` variable to ``False``. On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On Windows, don't forget to restart your terminal for the change to be taken into account. Examples -------- >>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module") >>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB") >>> def my_sink(message): ... record = message.record ... update_db(message, time=record.time, level=record.level) ... >>> logger.add(my_sink) >>> from logging import StreamHandler >>> logger.add(StreamHandler(sys.stderr), format="{message}") >>> class RandomStream: ... def __init__(self, seed, threshold): ... self.threshold = threshold ... random.seed(seed) ... def write(self, message): ... if random.random() > self.threshold: ... print(message) ... >>> stream_object = RandomStream(seed=12345, threhold=0.25) >>> logger.add(stream_object, level="INFO") >>> logger.add(RandomStream, level="DEBUG", seed=34567, threshold=0.5)
[ "r", "Add", "a", "handler", "sending", "log", "messages", "to", "a", "sink", "adequately", "configured", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L166-L843
train
Delgan/loguru
loguru/_logger.py
Logger.remove
def remove(self, handler_id=None): """Remove a previously added handler and stop sending logs to its sink. Parameters ---------- handler_id : |int| or ``None`` The id of the sink to remove, as it was returned by the |add| method. If ``None``, all handlers are removed. The pre-configured handler is guaranteed to have the index ``0``. Raises ------ ValueError If ``handler_id`` is not ``None`` but there is no active handler with such id. Examples -------- >>> i = logger.add(sys.stderr, format="{message}") >>> logger.info("Logging") Logging >>> logger.remove(i) >>> logger.info("No longer logging") """ with self._lock: handlers = self._handlers.copy() if handler_id is None: for handler in handlers.values(): handler.stop() handlers.clear() else: try: handler = handlers.pop(handler_id) except KeyError: raise ValueError("There is no existing handler with id '%s'" % handler_id) handler.stop() levelnos = (h.levelno for h in handlers.values()) self.__class__._min_level = min(levelnos, default=float("inf")) self.__class__._handlers = handlers
python
def remove(self, handler_id=None): """Remove a previously added handler and stop sending logs to its sink. Parameters ---------- handler_id : |int| or ``None`` The id of the sink to remove, as it was returned by the |add| method. If ``None``, all handlers are removed. The pre-configured handler is guaranteed to have the index ``0``. Raises ------ ValueError If ``handler_id`` is not ``None`` but there is no active handler with such id. Examples -------- >>> i = logger.add(sys.stderr, format="{message}") >>> logger.info("Logging") Logging >>> logger.remove(i) >>> logger.info("No longer logging") """ with self._lock: handlers = self._handlers.copy() if handler_id is None: for handler in handlers.values(): handler.stop() handlers.clear() else: try: handler = handlers.pop(handler_id) except KeyError: raise ValueError("There is no existing handler with id '%s'" % handler_id) handler.stop() levelnos = (h.levelno for h in handlers.values()) self.__class__._min_level = min(levelnos, default=float("inf")) self.__class__._handlers = handlers
[ "def", "remove", "(", "self", ",", "handler_id", "=", "None", ")", ":", "with", "self", ".", "_lock", ":", "handlers", "=", "self", ".", "_handlers", ".", "copy", "(", ")", "if", "handler_id", "is", "None", ":", "for", "handler", "in", "handlers", "....
Remove a previously added handler and stop sending logs to its sink. Parameters ---------- handler_id : |int| or ``None`` The id of the sink to remove, as it was returned by the |add| method. If ``None``, all handlers are removed. The pre-configured handler is guaranteed to have the index ``0``. Raises ------ ValueError If ``handler_id`` is not ``None`` but there is no active handler with such id. Examples -------- >>> i = logger.add(sys.stderr, format="{message}") >>> logger.info("Logging") Logging >>> logger.remove(i) >>> logger.info("No longer logging")
[ "Remove", "a", "previously", "added", "handler", "and", "stop", "sending", "logs", "to", "its", "sink", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L845-L883
train
Delgan/loguru
loguru/_logger.py
Logger.catch
def catch( self, exception=Exception, *, level="ERROR", reraise=False, message="An error has been caught in function '{record[function]}', " "process '{record[process].name}' ({record[process].id}), " "thread '{record[thread].name}' ({record[thread].id}):" ): """Return a decorator to automatically log possibly caught error in wrapped function. This is useful to ensure unexpected exceptions are logged, the entire program can be wrapped by this method. This is also very useful to decorate |Thread.run| methods while using threads to propagate errors to the main logger thread. Note that the visibility of variables values (which uses the cool `better_exceptions`_ library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink. The returned object can also be used as a context manager. Parameters ---------- exception : |Exception|, optional The type of exception to intercept. If several types should be caught, a tuple of exceptions can be used too. level : |str| or |int|, optional The level name or severity with which the message should be logged. reraise : |bool|, optional Whether or not the exception should be raised again and hence propagated to the caller. message : |str|, optional The message that will be automatically logged if an exception occurs. Note that it will be formatted with the ``record`` attribute. Returns ------- decorator / context manager An object that can be used to decorate a function or as a context manager to log exceptions possibly caught. Examples -------- >>> @logger.catch ... def f(x): ... 100 / x ... >>> def g(): ... f(10) ... f(0) ... >>> g() ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398): Traceback (most recent call last): File "program.py", line 12, in <module> g() └ <function g at 0x7f225fe2bc80> > File "program.py", line 10, in g f(0) └ <function f at 0x7f225fe2b9d8> File "program.py", line 6, in f 100 / x └ 0 ZeroDivisionError: division by zero >>> with logger.catch(message="Because we never know..."): ... main() # No exception, no logs ... """ if callable(exception) and ( not isclass(exception) or not issubclass(exception, BaseException) ): return self.catch()(exception) class Catcher: def __init__(self, as_decorator): self._as_decorator = as_decorator def __enter__(self_): return None def __exit__(self_, type_, value, traceback_): if type_ is None: return if not issubclass(type_, exception): return False if self_._as_decorator: back = 2 else: back = 1 logger_ = self.opt( exception=True, record=True, lazy=self._lazy, ansi=self._ansi, raw=self._raw, depth=self._depth + back, ) log = logger_._make_log_function(level) log(logger_, message) return not reraise def __call__(_, function): catcher = Catcher(True) if inspect.iscoroutinefunction(function): async def catch_wrapper(*args, **kwargs): with catcher: return await function(*args, **kwargs) elif inspect.isgeneratorfunction(function): def catch_wrapper(*args, **kwargs): with catcher: return (yield from function(*args, **kwargs)) else: def catch_wrapper(*args, **kwargs): with catcher: return function(*args, **kwargs) functools.update_wrapper(catch_wrapper, function) return catch_wrapper return Catcher(False)
python
def catch( self, exception=Exception, *, level="ERROR", reraise=False, message="An error has been caught in function '{record[function]}', " "process '{record[process].name}' ({record[process].id}), " "thread '{record[thread].name}' ({record[thread].id}):" ): """Return a decorator to automatically log possibly caught error in wrapped function. This is useful to ensure unexpected exceptions are logged, the entire program can be wrapped by this method. This is also very useful to decorate |Thread.run| methods while using threads to propagate errors to the main logger thread. Note that the visibility of variables values (which uses the cool `better_exceptions`_ library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink. The returned object can also be used as a context manager. Parameters ---------- exception : |Exception|, optional The type of exception to intercept. If several types should be caught, a tuple of exceptions can be used too. level : |str| or |int|, optional The level name or severity with which the message should be logged. reraise : |bool|, optional Whether or not the exception should be raised again and hence propagated to the caller. message : |str|, optional The message that will be automatically logged if an exception occurs. Note that it will be formatted with the ``record`` attribute. Returns ------- decorator / context manager An object that can be used to decorate a function or as a context manager to log exceptions possibly caught. Examples -------- >>> @logger.catch ... def f(x): ... 100 / x ... >>> def g(): ... f(10) ... f(0) ... >>> g() ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398): Traceback (most recent call last): File "program.py", line 12, in <module> g() └ <function g at 0x7f225fe2bc80> > File "program.py", line 10, in g f(0) └ <function f at 0x7f225fe2b9d8> File "program.py", line 6, in f 100 / x └ 0 ZeroDivisionError: division by zero >>> with logger.catch(message="Because we never know..."): ... main() # No exception, no logs ... """ if callable(exception) and ( not isclass(exception) or not issubclass(exception, BaseException) ): return self.catch()(exception) class Catcher: def __init__(self, as_decorator): self._as_decorator = as_decorator def __enter__(self_): return None def __exit__(self_, type_, value, traceback_): if type_ is None: return if not issubclass(type_, exception): return False if self_._as_decorator: back = 2 else: back = 1 logger_ = self.opt( exception=True, record=True, lazy=self._lazy, ansi=self._ansi, raw=self._raw, depth=self._depth + back, ) log = logger_._make_log_function(level) log(logger_, message) return not reraise def __call__(_, function): catcher = Catcher(True) if inspect.iscoroutinefunction(function): async def catch_wrapper(*args, **kwargs): with catcher: return await function(*args, **kwargs) elif inspect.isgeneratorfunction(function): def catch_wrapper(*args, **kwargs): with catcher: return (yield from function(*args, **kwargs)) else: def catch_wrapper(*args, **kwargs): with catcher: return function(*args, **kwargs) functools.update_wrapper(catch_wrapper, function) return catch_wrapper return Catcher(False)
[ "def", "catch", "(", "self", ",", "exception", "=", "Exception", ",", "*", ",", "level", "=", "\"ERROR\"", ",", "reraise", "=", "False", ",", "message", "=", "\"An error has been caught in function '{record[function]}', \"", "\"process '{record[process].name}' ({record[pr...
Return a decorator to automatically log possibly caught error in wrapped function. This is useful to ensure unexpected exceptions are logged, the entire program can be wrapped by this method. This is also very useful to decorate |Thread.run| methods while using threads to propagate errors to the main logger thread. Note that the visibility of variables values (which uses the cool `better_exceptions`_ library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink. The returned object can also be used as a context manager. Parameters ---------- exception : |Exception|, optional The type of exception to intercept. If several types should be caught, a tuple of exceptions can be used too. level : |str| or |int|, optional The level name or severity with which the message should be logged. reraise : |bool|, optional Whether or not the exception should be raised again and hence propagated to the caller. message : |str|, optional The message that will be automatically logged if an exception occurs. Note that it will be formatted with the ``record`` attribute. Returns ------- decorator / context manager An object that can be used to decorate a function or as a context manager to log exceptions possibly caught. Examples -------- >>> @logger.catch ... def f(x): ... 100 / x ... >>> def g(): ... f(10) ... f(0) ... >>> g() ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398): Traceback (most recent call last): File "program.py", line 12, in <module> g() └ <function g at 0x7f225fe2bc80> > File "program.py", line 10, in g f(0) └ <function f at 0x7f225fe2b9d8> File "program.py", line 6, in f 100 / x └ 0 ZeroDivisionError: division by zero >>> with logger.catch(message="Because we never know..."): ... main() # No exception, no logs ...
[ "Return", "a", "decorator", "to", "automatically", "log", "possibly", "caught", "error", "in", "wrapped", "function", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L885-L1011
train
Delgan/loguru
loguru/_logger.py
Logger.opt
def opt(self, *, exception=None, record=False, lazy=False, ansi=False, raw=False, depth=0): r"""Parametrize a logging call to slightly change generated log message. Parameters ---------- exception : |bool|, |tuple| or |Exception|, optional If it does not evaluate as ``False``, the passed exception is formatted and added to the log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple, otherwise the exception information is retrieved from |sys.exc_info|. record : |bool|, optional If ``True``, the record dict contextualizing the logging call can be used to format the message by using ``{record[key]}`` in the log message. lazy : |bool|, optional If ``True``, the logging call attribute to format the message should be functions which will be called only if the level is high enough. This can be used to avoid expensive functions if not necessary. ansi : |bool|, optional If ``True``, logged message will be colorized according to the markups it possibly contains. raw : |bool|, optional If ``True``, the formatting of each sink will be bypassed and the message will be send as is. depth : |int|, optional Specify which stacktrace should be used to contextualize the logged message. This is useful while using the logger from inside a wrapped function to retrieve worthwhile information. Returns ------- :class:`~Logger` A logger wrapping the core logger, but transforming logged message adequately before sending. Examples -------- >>> try: ... 1 / 0 ... except ZeroDivisionError: ... logger.opt(exception=True).debug("Exception logged with debug level:") ... [18:10:02] DEBUG in '<module>' - Exception logged with debug level: Traceback (most recent call last, catch point marked): > File "<stdin>", line 2, in <module> ZeroDivisionError: division by zero >>> logger.opt(record=True).info("Current line is: {record[line]}") [18:10:33] INFO in '<module>' - Current line is: 1 >>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5)) [18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000 >>> logger.opt(ansi=True).warning("We got a <red>BIG</red> problem") [18:11:30] WARNING in '<module>' - We got a BIG problem >>> logger.opt(raw=True).debug("No formatting\n") No formatting >>> def wrapped(): ... logger.opt(depth=1).info("Get parent context") ... >>> def func(): ... wrapped() ... >>> func() [18:11:54] DEBUG in 'func' - Get parent context """ return Logger(self._extra, exception, record, lazy, ansi, raw, depth)
python
def opt(self, *, exception=None, record=False, lazy=False, ansi=False, raw=False, depth=0): r"""Parametrize a logging call to slightly change generated log message. Parameters ---------- exception : |bool|, |tuple| or |Exception|, optional If it does not evaluate as ``False``, the passed exception is formatted and added to the log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple, otherwise the exception information is retrieved from |sys.exc_info|. record : |bool|, optional If ``True``, the record dict contextualizing the logging call can be used to format the message by using ``{record[key]}`` in the log message. lazy : |bool|, optional If ``True``, the logging call attribute to format the message should be functions which will be called only if the level is high enough. This can be used to avoid expensive functions if not necessary. ansi : |bool|, optional If ``True``, logged message will be colorized according to the markups it possibly contains. raw : |bool|, optional If ``True``, the formatting of each sink will be bypassed and the message will be send as is. depth : |int|, optional Specify which stacktrace should be used to contextualize the logged message. This is useful while using the logger from inside a wrapped function to retrieve worthwhile information. Returns ------- :class:`~Logger` A logger wrapping the core logger, but transforming logged message adequately before sending. Examples -------- >>> try: ... 1 / 0 ... except ZeroDivisionError: ... logger.opt(exception=True).debug("Exception logged with debug level:") ... [18:10:02] DEBUG in '<module>' - Exception logged with debug level: Traceback (most recent call last, catch point marked): > File "<stdin>", line 2, in <module> ZeroDivisionError: division by zero >>> logger.opt(record=True).info("Current line is: {record[line]}") [18:10:33] INFO in '<module>' - Current line is: 1 >>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5)) [18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000 >>> logger.opt(ansi=True).warning("We got a <red>BIG</red> problem") [18:11:30] WARNING in '<module>' - We got a BIG problem >>> logger.opt(raw=True).debug("No formatting\n") No formatting >>> def wrapped(): ... logger.opt(depth=1).info("Get parent context") ... >>> def func(): ... wrapped() ... >>> func() [18:11:54] DEBUG in 'func' - Get parent context """ return Logger(self._extra, exception, record, lazy, ansi, raw, depth)
[ "def", "opt", "(", "self", ",", "*", ",", "exception", "=", "None", ",", "record", "=", "False", ",", "lazy", "=", "False", ",", "ansi", "=", "False", ",", "raw", "=", "False", ",", "depth", "=", "0", ")", ":", "return", "Logger", "(", "self", ...
r"""Parametrize a logging call to slightly change generated log message. Parameters ---------- exception : |bool|, |tuple| or |Exception|, optional If it does not evaluate as ``False``, the passed exception is formatted and added to the log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple, otherwise the exception information is retrieved from |sys.exc_info|. record : |bool|, optional If ``True``, the record dict contextualizing the logging call can be used to format the message by using ``{record[key]}`` in the log message. lazy : |bool|, optional If ``True``, the logging call attribute to format the message should be functions which will be called only if the level is high enough. This can be used to avoid expensive functions if not necessary. ansi : |bool|, optional If ``True``, logged message will be colorized according to the markups it possibly contains. raw : |bool|, optional If ``True``, the formatting of each sink will be bypassed and the message will be send as is. depth : |int|, optional Specify which stacktrace should be used to contextualize the logged message. This is useful while using the logger from inside a wrapped function to retrieve worthwhile information. Returns ------- :class:`~Logger` A logger wrapping the core logger, but transforming logged message adequately before sending. Examples -------- >>> try: ... 1 / 0 ... except ZeroDivisionError: ... logger.opt(exception=True).debug("Exception logged with debug level:") ... [18:10:02] DEBUG in '<module>' - Exception logged with debug level: Traceback (most recent call last, catch point marked): > File "<stdin>", line 2, in <module> ZeroDivisionError: division by zero >>> logger.opt(record=True).info("Current line is: {record[line]}") [18:10:33] INFO in '<module>' - Current line is: 1 >>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5)) [18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000 >>> logger.opt(ansi=True).warning("We got a <red>BIG</red> problem") [18:11:30] WARNING in '<module>' - We got a BIG problem >>> logger.opt(raw=True).debug("No formatting\n") No formatting >>> def wrapped(): ... logger.opt(depth=1).info("Get parent context") ... >>> def func(): ... wrapped() ... >>> func() [18:11:54] DEBUG in 'func' - Get parent context
[ "r", "Parametrize", "a", "logging", "call", "to", "slightly", "change", "generated", "log", "message", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1013-L1079
train
Delgan/loguru
loguru/_logger.py
Logger.bind
def bind(_self, **kwargs): """Bind attributes to the ``extra`` dict of each logged message record. This is used to add custom context to each logging call. Parameters ---------- **kwargs Mapping between keys and values that will be added to the ``extra`` dict. Returns ------- :class:`~Logger` A logger wrapping the core logger, but which sends record with the customized ``extra`` dict. Examples -------- >>> logger.add(sys.stderr, format="{extra[ip]} - {message}") 1 >>> class Server: ... def __init__(self, ip): ... self.ip = ip ... self.logger = logger.bind(ip=ip) ... def call(self, message): ... self.logger.info(message) ... >>> instance_1 = Server("192.168.0.200") >>> instance_2 = Server("127.0.0.1") >>> instance_1.call("First instance") 192.168.0.200 - First instance >>> instance_2.call("Second instance") 127.0.0.1 - Second instance """ return Logger( {**_self._extra, **kwargs}, _self._exception, _self._record, _self._lazy, _self._ansi, _self._raw, _self._depth, )
python
def bind(_self, **kwargs): """Bind attributes to the ``extra`` dict of each logged message record. This is used to add custom context to each logging call. Parameters ---------- **kwargs Mapping between keys and values that will be added to the ``extra`` dict. Returns ------- :class:`~Logger` A logger wrapping the core logger, but which sends record with the customized ``extra`` dict. Examples -------- >>> logger.add(sys.stderr, format="{extra[ip]} - {message}") 1 >>> class Server: ... def __init__(self, ip): ... self.ip = ip ... self.logger = logger.bind(ip=ip) ... def call(self, message): ... self.logger.info(message) ... >>> instance_1 = Server("192.168.0.200") >>> instance_2 = Server("127.0.0.1") >>> instance_1.call("First instance") 192.168.0.200 - First instance >>> instance_2.call("Second instance") 127.0.0.1 - Second instance """ return Logger( {**_self._extra, **kwargs}, _self._exception, _self._record, _self._lazy, _self._ansi, _self._raw, _self._depth, )
[ "def", "bind", "(", "_self", ",", "*", "*", "kwargs", ")", ":", "return", "Logger", "(", "{", "*", "*", "_self", ".", "_extra", ",", "*", "*", "kwargs", "}", ",", "_self", ".", "_exception", ",", "_self", ".", "_record", ",", "_self", ".", "_lazy...
Bind attributes to the ``extra`` dict of each logged message record. This is used to add custom context to each logging call. Parameters ---------- **kwargs Mapping between keys and values that will be added to the ``extra`` dict. Returns ------- :class:`~Logger` A logger wrapping the core logger, but which sends record with the customized ``extra`` dict. Examples -------- >>> logger.add(sys.stderr, format="{extra[ip]} - {message}") 1 >>> class Server: ... def __init__(self, ip): ... self.ip = ip ... self.logger = logger.bind(ip=ip) ... def call(self, message): ... self.logger.info(message) ... >>> instance_1 = Server("192.168.0.200") >>> instance_2 = Server("127.0.0.1") >>> instance_1.call("First instance") 192.168.0.200 - First instance >>> instance_2.call("Second instance") 127.0.0.1 - Second instance
[ "Bind", "attributes", "to", "the", "extra", "dict", "of", "each", "logged", "message", "record", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1081-L1123
train
Delgan/loguru
loguru/_logger.py
Logger.level
def level(self, name, no=None, color=None, icon=None): """Add, update or retrieve a logging level. Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color`` and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom level, you should necessarily use its name, the severity number is not linked back to levels name (this implies that several levels can share the same severity). To add a new level, all parameters should be passed so it can be properly configured. To update an existing level, pass its ``name`` with the parameters to be changed. To retrieve level information, the ``name`` solely suffices. Parameters ---------- name : |str| The name of the logging level. no : |int| The severity of the level to be added or updated. color : |str| The color markup of the level to be added or updated. icon : |str| The icon of the level to be added or updated. Returns ------- ``Level`` A namedtuple containing information about the level. Examples -------- >>> level = logger.level("ERROR") Level(no=40, color='<red><bold>', icon='❌') >>> logger.add(sys.stderr, format="{level.no} {icon} {message}") >>> logger.level("CUSTOM", no=15, color="<blue>", icon="@") >>> logger.log("CUSTOM", "Logging...") 15 @ Logging... >>> logger.level("WARNING", icon=r"/!\\") >>> logger.warning("Updated!") 30 /!\\ Updated! """ if not isinstance(name, str): raise ValueError( "Invalid level name, it should be a string, not: '%s'" % type(name).__name__ ) if no is color is icon is None: try: return self._levels[name] except KeyError: raise ValueError("Level '%s' does not exist" % name) if name not in self._levels: if no is None: raise ValueError( "Level '%s' does not exist, you have to create it by specifying a level no" % name ) else: old_no, old_color, old_icon = None, "", " " else: old_no, old_color, old_icon = self.level(name) if no is None: no = old_no if color is None: color = old_color if icon is None: icon = old_icon if not isinstance(no, int): raise ValueError( "Invalid level no, it should be an integer, not: '%s'" % type(no).__name__ ) if no < 0: raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no) self._levels[name] = Level(no, color, icon) with self._lock: for handler in self._handlers.values(): handler.update_format(color) return self.level(name)
python
def level(self, name, no=None, color=None, icon=None): """Add, update or retrieve a logging level. Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color`` and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom level, you should necessarily use its name, the severity number is not linked back to levels name (this implies that several levels can share the same severity). To add a new level, all parameters should be passed so it can be properly configured. To update an existing level, pass its ``name`` with the parameters to be changed. To retrieve level information, the ``name`` solely suffices. Parameters ---------- name : |str| The name of the logging level. no : |int| The severity of the level to be added or updated. color : |str| The color markup of the level to be added or updated. icon : |str| The icon of the level to be added or updated. Returns ------- ``Level`` A namedtuple containing information about the level. Examples -------- >>> level = logger.level("ERROR") Level(no=40, color='<red><bold>', icon='❌') >>> logger.add(sys.stderr, format="{level.no} {icon} {message}") >>> logger.level("CUSTOM", no=15, color="<blue>", icon="@") >>> logger.log("CUSTOM", "Logging...") 15 @ Logging... >>> logger.level("WARNING", icon=r"/!\\") >>> logger.warning("Updated!") 30 /!\\ Updated! """ if not isinstance(name, str): raise ValueError( "Invalid level name, it should be a string, not: '%s'" % type(name).__name__ ) if no is color is icon is None: try: return self._levels[name] except KeyError: raise ValueError("Level '%s' does not exist" % name) if name not in self._levels: if no is None: raise ValueError( "Level '%s' does not exist, you have to create it by specifying a level no" % name ) else: old_no, old_color, old_icon = None, "", " " else: old_no, old_color, old_icon = self.level(name) if no is None: no = old_no if color is None: color = old_color if icon is None: icon = old_icon if not isinstance(no, int): raise ValueError( "Invalid level no, it should be an integer, not: '%s'" % type(no).__name__ ) if no < 0: raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no) self._levels[name] = Level(no, color, icon) with self._lock: for handler in self._handlers.values(): handler.update_format(color) return self.level(name)
[ "def", "level", "(", "self", ",", "name", ",", "no", "=", "None", ",", "color", "=", "None", ",", "icon", "=", "None", ")", ":", "if", "not", "isinstance", "(", "name", ",", "str", ")", ":", "raise", "ValueError", "(", "\"Invalid level name, it should ...
Add, update or retrieve a logging level. Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color`` and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom level, you should necessarily use its name, the severity number is not linked back to levels name (this implies that several levels can share the same severity). To add a new level, all parameters should be passed so it can be properly configured. To update an existing level, pass its ``name`` with the parameters to be changed. To retrieve level information, the ``name`` solely suffices. Parameters ---------- name : |str| The name of the logging level. no : |int| The severity of the level to be added or updated. color : |str| The color markup of the level to be added or updated. icon : |str| The icon of the level to be added or updated. Returns ------- ``Level`` A namedtuple containing information about the level. Examples -------- >>> level = logger.level("ERROR") Level(no=40, color='<red><bold>', icon='❌') >>> logger.add(sys.stderr, format="{level.no} {icon} {message}") >>> logger.level("CUSTOM", no=15, color="<blue>", icon="@") >>> logger.log("CUSTOM", "Logging...") 15 @ Logging... >>> logger.level("WARNING", icon=r"/!\\") >>> logger.warning("Updated!") 30 /!\\ Updated!
[ "Add", "update", "or", "retrieve", "a", "logging", "level", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1125-L1212
train
Delgan/loguru
loguru/_logger.py
Logger.configure
def configure(self, *, handlers=None, levels=None, extra=None, activation=None): """Configure the core logger. It should be noted that ``extra`` values set using this function are available across all modules, so this is the best way to set overall default values. Parameters ---------- handlers : |list| of |dict|, optional A list of each handler to be added. The list should contain dicts of params passed to the |add| function as keyword arguments. If not ``None``, all previously added handlers are first removed. levels : |list| of |dict|, optional A list of each level to be added or updated. The list should contain dicts of params passed to the |level| function as keyword arguments. This will never remove previously created levels. extra : |dict|, optional A dict containing additional parameters bound to the core logger, useful to share common properties if you call |bind| in several of your files modules. If not ``None``, this will remove previously configured ``extra`` dict. activation : |list| of |tuple|, optional A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if `state` is ``True``) or disabled (if `state` is ``False``). The calls to |enable| and |disable| are made accordingly to the list order. This will not modify previously activated loggers, so if you need a fresh start preprend your list with ``("", False)`` or ``("", True)``. Returns ------- :class:`list` of :class:`int` A list containing the identifiers of added sinks (if any). Examples -------- >>> logger.configure( ... handlers=[ ... dict(sink=sys.stderr, format="[{time}] {message}"), ... dict(sink="file.log", enqueue=True, serialize=True), ... ], ... levels=[dict(name="NEW", no=13, icon="¤", color="")], ... extra={"common_to_all": "default"}, ... activation=[("my_module.secret", False), ("another_library.module", True)], ... ) [1, 2] >>> # Set a default "extra" dict to logger across all modules, without "bind()" >>> extra = {"context": "foo"} >>> logger.configure(extra=extra) >>> logger.start(sys.stderr, format="{extra[context]} - {message}") >>> logger.info("Context without bind") >>> # => "foo - Context without bind" >>> logger.bind(context="bar").info("Suppress global context") >>> # => "bar - Suppress global context" """ if handlers is not None: self.remove() else: handlers = [] if levels is not None: for params in levels: self.level(**params) if extra is not None: with self._lock: self._extra_class.clear() self._extra_class.update(extra) if activation is not None: for name, state in activation: if state: self.enable(name) else: self.disable(name) return [self.add(**params) for params in handlers]
python
def configure(self, *, handlers=None, levels=None, extra=None, activation=None): """Configure the core logger. It should be noted that ``extra`` values set using this function are available across all modules, so this is the best way to set overall default values. Parameters ---------- handlers : |list| of |dict|, optional A list of each handler to be added. The list should contain dicts of params passed to the |add| function as keyword arguments. If not ``None``, all previously added handlers are first removed. levels : |list| of |dict|, optional A list of each level to be added or updated. The list should contain dicts of params passed to the |level| function as keyword arguments. This will never remove previously created levels. extra : |dict|, optional A dict containing additional parameters bound to the core logger, useful to share common properties if you call |bind| in several of your files modules. If not ``None``, this will remove previously configured ``extra`` dict. activation : |list| of |tuple|, optional A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if `state` is ``True``) or disabled (if `state` is ``False``). The calls to |enable| and |disable| are made accordingly to the list order. This will not modify previously activated loggers, so if you need a fresh start preprend your list with ``("", False)`` or ``("", True)``. Returns ------- :class:`list` of :class:`int` A list containing the identifiers of added sinks (if any). Examples -------- >>> logger.configure( ... handlers=[ ... dict(sink=sys.stderr, format="[{time}] {message}"), ... dict(sink="file.log", enqueue=True, serialize=True), ... ], ... levels=[dict(name="NEW", no=13, icon="¤", color="")], ... extra={"common_to_all": "default"}, ... activation=[("my_module.secret", False), ("another_library.module", True)], ... ) [1, 2] >>> # Set a default "extra" dict to logger across all modules, without "bind()" >>> extra = {"context": "foo"} >>> logger.configure(extra=extra) >>> logger.start(sys.stderr, format="{extra[context]} - {message}") >>> logger.info("Context without bind") >>> # => "foo - Context without bind" >>> logger.bind(context="bar").info("Suppress global context") >>> # => "bar - Suppress global context" """ if handlers is not None: self.remove() else: handlers = [] if levels is not None: for params in levels: self.level(**params) if extra is not None: with self._lock: self._extra_class.clear() self._extra_class.update(extra) if activation is not None: for name, state in activation: if state: self.enable(name) else: self.disable(name) return [self.add(**params) for params in handlers]
[ "def", "configure", "(", "self", ",", "*", ",", "handlers", "=", "None", ",", "levels", "=", "None", ",", "extra", "=", "None", ",", "activation", "=", "None", ")", ":", "if", "handlers", "is", "not", "None", ":", "self", ".", "remove", "(", ")", ...
Configure the core logger. It should be noted that ``extra`` values set using this function are available across all modules, so this is the best way to set overall default values. Parameters ---------- handlers : |list| of |dict|, optional A list of each handler to be added. The list should contain dicts of params passed to the |add| function as keyword arguments. If not ``None``, all previously added handlers are first removed. levels : |list| of |dict|, optional A list of each level to be added or updated. The list should contain dicts of params passed to the |level| function as keyword arguments. This will never remove previously created levels. extra : |dict|, optional A dict containing additional parameters bound to the core logger, useful to share common properties if you call |bind| in several of your files modules. If not ``None``, this will remove previously configured ``extra`` dict. activation : |list| of |tuple|, optional A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if `state` is ``True``) or disabled (if `state` is ``False``). The calls to |enable| and |disable| are made accordingly to the list order. This will not modify previously activated loggers, so if you need a fresh start preprend your list with ``("", False)`` or ``("", True)``. Returns ------- :class:`list` of :class:`int` A list containing the identifiers of added sinks (if any). Examples -------- >>> logger.configure( ... handlers=[ ... dict(sink=sys.stderr, format="[{time}] {message}"), ... dict(sink="file.log", enqueue=True, serialize=True), ... ], ... levels=[dict(name="NEW", no=13, icon="¤", color="")], ... extra={"common_to_all": "default"}, ... activation=[("my_module.secret", False), ("another_library.module", True)], ... ) [1, 2] >>> # Set a default "extra" dict to logger across all modules, without "bind()" >>> extra = {"context": "foo"} >>> logger.configure(extra=extra) >>> logger.start(sys.stderr, format="{extra[context]} - {message}") >>> logger.info("Context without bind") >>> # => "foo - Context without bind" >>> logger.bind(context="bar").info("Suppress global context") >>> # => "bar - Suppress global context"
[ "Configure", "the", "core", "logger", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1255-L1330
train
Delgan/loguru
loguru/_logger.py
Logger.parse
def parse(file, pattern, *, cast={}, chunk=2 ** 16): """ Parse raw logs and extract each entry as a |dict|. The logging format has to be specified as the regex ``pattern``, it will then be used to parse the ``file`` and retrieve each entries based on the named groups present in the regex. Parameters ---------- file : |str|, |Path| or |file-like object|_ The path of the log file to be parsed, or alternatively an already opened file object. pattern : |str| or |re.Pattern|_ The regex to use for logs parsing, it should contain named groups which will be included in the returned dict. cast : |function|_ or |dict|, optional A function that should convert in-place the regex groups parsed (a dict of string values) to more appropriate types. If a dict is passed, its should be a mapping between keys of parsed log dict and the function that should be used to convert the associated value. chunk : |int|, optional The number of bytes read while iterating through the logs, this avoid having to load the whole file in memory. Yields ------ :class:`dict` The dict mapping regex named groups to matched values, as returned by |match.groupdict| and optionally converted according to ``cast`` argument. Examples -------- >>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)" # If log format is "{level.no} - {message}" >>> for e in logger.parse("file.log", reg): # A file line could be "10 - A debug message" ... print(e) # => {'lvl': '10', 'msg': 'A debug message'} ... >>> caster = dict(lvl=int) # Parse 'lvl' key as an integer >>> for e in logger.parse("file.log", reg, cast=caster): ... print(e) # => {'lvl': 10, 'msg': 'A debug message'} >>> def cast(groups): ... if "date" in groups: ... groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S") ... >>> with open("file.log") as file: ... for log in logger.parse(file, reg, cast=cast): ... print(log["date"], log["something_else"]) """ if isinstance(file, (str, PathLike)): should_close = True fileobj = open(str(file)) elif hasattr(file, "read") and callable(file.read): should_close = False fileobj = file else: raise ValueError( "Invalid file, it should be a string path or a file object, not: '%s'" % type(file).__name__ ) if isinstance(cast, dict): def cast_function(groups): for key, converter in cast.items(): if key in groups: groups[key] = converter(groups[key]) elif callable(cast): cast_function = cast else: raise ValueError( "Invalid cast, it should be a function or a dict, not: '%s'" % type(cast).__name__ ) try: regex = re.compile(pattern) except TypeError: raise ValueError( "Invalid pattern, it should be a string or a compiled regex, not: '%s'" % type(pattern).__name__ ) matches = Logger._find_iter(fileobj, regex, chunk) for match in matches: groups = match.groupdict() cast_function(groups) yield groups if should_close: fileobj.close()
python
def parse(file, pattern, *, cast={}, chunk=2 ** 16): """ Parse raw logs and extract each entry as a |dict|. The logging format has to be specified as the regex ``pattern``, it will then be used to parse the ``file`` and retrieve each entries based on the named groups present in the regex. Parameters ---------- file : |str|, |Path| or |file-like object|_ The path of the log file to be parsed, or alternatively an already opened file object. pattern : |str| or |re.Pattern|_ The regex to use for logs parsing, it should contain named groups which will be included in the returned dict. cast : |function|_ or |dict|, optional A function that should convert in-place the regex groups parsed (a dict of string values) to more appropriate types. If a dict is passed, its should be a mapping between keys of parsed log dict and the function that should be used to convert the associated value. chunk : |int|, optional The number of bytes read while iterating through the logs, this avoid having to load the whole file in memory. Yields ------ :class:`dict` The dict mapping regex named groups to matched values, as returned by |match.groupdict| and optionally converted according to ``cast`` argument. Examples -------- >>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)" # If log format is "{level.no} - {message}" >>> for e in logger.parse("file.log", reg): # A file line could be "10 - A debug message" ... print(e) # => {'lvl': '10', 'msg': 'A debug message'} ... >>> caster = dict(lvl=int) # Parse 'lvl' key as an integer >>> for e in logger.parse("file.log", reg, cast=caster): ... print(e) # => {'lvl': 10, 'msg': 'A debug message'} >>> def cast(groups): ... if "date" in groups: ... groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S") ... >>> with open("file.log") as file: ... for log in logger.parse(file, reg, cast=cast): ... print(log["date"], log["something_else"]) """ if isinstance(file, (str, PathLike)): should_close = True fileobj = open(str(file)) elif hasattr(file, "read") and callable(file.read): should_close = False fileobj = file else: raise ValueError( "Invalid file, it should be a string path or a file object, not: '%s'" % type(file).__name__ ) if isinstance(cast, dict): def cast_function(groups): for key, converter in cast.items(): if key in groups: groups[key] = converter(groups[key]) elif callable(cast): cast_function = cast else: raise ValueError( "Invalid cast, it should be a function or a dict, not: '%s'" % type(cast).__name__ ) try: regex = re.compile(pattern) except TypeError: raise ValueError( "Invalid pattern, it should be a string or a compiled regex, not: '%s'" % type(pattern).__name__ ) matches = Logger._find_iter(fileobj, regex, chunk) for match in matches: groups = match.groupdict() cast_function(groups) yield groups if should_close: fileobj.close()
[ "def", "parse", "(", "file", ",", "pattern", ",", "*", ",", "cast", "=", "{", "}", ",", "chunk", "=", "2", "**", "16", ")", ":", "if", "isinstance", "(", "file", ",", "(", "str", ",", "PathLike", ")", ")", ":", "should_close", "=", "True", "fil...
Parse raw logs and extract each entry as a |dict|. The logging format has to be specified as the regex ``pattern``, it will then be used to parse the ``file`` and retrieve each entries based on the named groups present in the regex. Parameters ---------- file : |str|, |Path| or |file-like object|_ The path of the log file to be parsed, or alternatively an already opened file object. pattern : |str| or |re.Pattern|_ The regex to use for logs parsing, it should contain named groups which will be included in the returned dict. cast : |function|_ or |dict|, optional A function that should convert in-place the regex groups parsed (a dict of string values) to more appropriate types. If a dict is passed, its should be a mapping between keys of parsed log dict and the function that should be used to convert the associated value. chunk : |int|, optional The number of bytes read while iterating through the logs, this avoid having to load the whole file in memory. Yields ------ :class:`dict` The dict mapping regex named groups to matched values, as returned by |match.groupdict| and optionally converted according to ``cast`` argument. Examples -------- >>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)" # If log format is "{level.no} - {message}" >>> for e in logger.parse("file.log", reg): # A file line could be "10 - A debug message" ... print(e) # => {'lvl': '10', 'msg': 'A debug message'} ... >>> caster = dict(lvl=int) # Parse 'lvl' key as an integer >>> for e in logger.parse("file.log", reg, cast=caster): ... print(e) # => {'lvl': 10, 'msg': 'A debug message'} >>> def cast(groups): ... if "date" in groups: ... groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S") ... >>> with open("file.log") as file: ... for log in logger.parse(file, reg, cast=cast): ... print(log["date"], log["something_else"])
[ "Parse", "raw", "logs", "and", "extract", "each", "entry", "as", "a", "|dict|", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1359-L1450
train
Delgan/loguru
loguru/_logger.py
Logger.log
def log(_self, _level, _message, *args, **kwargs): r"""Log ``_message.format(*args, **kwargs)`` with severity ``_level``.""" logger = _self.opt( exception=_self._exception, record=_self._record, lazy=_self._lazy, ansi=_self._ansi, raw=_self._raw, depth=_self._depth + 1, ) logger._make_log_function(_level)(logger, _message, *args, **kwargs)
python
def log(_self, _level, _message, *args, **kwargs): r"""Log ``_message.format(*args, **kwargs)`` with severity ``_level``.""" logger = _self.opt( exception=_self._exception, record=_self._record, lazy=_self._lazy, ansi=_self._ansi, raw=_self._raw, depth=_self._depth + 1, ) logger._make_log_function(_level)(logger, _message, *args, **kwargs)
[ "def", "log", "(", "_self", ",", "_level", ",", "_message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", "=", "_self", ".", "opt", "(", "exception", "=", "_self", ".", "_exception", ",", "record", "=", "_self", ".", "_record", ","...
r"""Log ``_message.format(*args, **kwargs)`` with severity ``_level``.
[ "r", "Log", "_message", ".", "format", "(", "*", "args", "**", "kwargs", ")", "with", "severity", "_level", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1600-L1610
train
Delgan/loguru
loguru/_logger.py
Logger.start
def start(self, *args, **kwargs): """Deprecated function to |add| a new handler. Warnings -------- .. deprecated:: 0.2.2 ``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less confusing name. """ warnings.warn( "The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning ) return self.add(*args, **kwargs)
python
def start(self, *args, **kwargs): """Deprecated function to |add| a new handler. Warnings -------- .. deprecated:: 0.2.2 ``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less confusing name. """ warnings.warn( "The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning ) return self.add(*args, **kwargs)
[ "def", "start", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"The 'start()' method is deprecated, please use 'add()' instead\"", ",", "DeprecationWarning", ")", "return", "self", ".", "add", "(", "*", "args", ...
Deprecated function to |add| a new handler. Warnings -------- .. deprecated:: 0.2.2 ``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less confusing name.
[ "Deprecated", "function", "to", "|add|", "a", "new", "handler", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1624-L1636
train
Delgan/loguru
loguru/_logger.py
Logger.stop
def stop(self, *args, **kwargs): """Deprecated function to |remove| an existing handler. Warnings -------- .. deprecated:: 0.2.2 ``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less confusing name. """ warnings.warn( "The 'stop()' method is deprecated, please use 'remove()' instead", DeprecationWarning ) return self.remove(*args, **kwargs)
python
def stop(self, *args, **kwargs): """Deprecated function to |remove| an existing handler. Warnings -------- .. deprecated:: 0.2.2 ``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less confusing name. """ warnings.warn( "The 'stop()' method is deprecated, please use 'remove()' instead", DeprecationWarning ) return self.remove(*args, **kwargs)
[ "def", "stop", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"The 'stop()' method is deprecated, please use 'remove()' instead\"", ",", "DeprecationWarning", ")", "return", "self", ".", "remove", "(", "*", "arg...
Deprecated function to |remove| an existing handler. Warnings -------- .. deprecated:: 0.2.2 ``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less confusing name.
[ "Deprecated", "function", "to", "|remove|", "an", "existing", "handler", "." ]
6571879c37904e3a18567e694d70651c6886b860
https://github.com/Delgan/loguru/blob/6571879c37904e3a18567e694d70651c6886b860/loguru/_logger.py#L1638-L1650
train
graphql-python/graphene-django
graphene_django/rest_framework/serializer_converter.py
convert_serializer_field
def convert_serializer_field(field, is_input=True): """ Converts a django rest frameworks field to a graphql field and marks the field as required if we are creating an input type and the field itself is required """ graphql_type = get_graphene_type_from_serializer_field(field) args = [] kwargs = {"description": field.help_text, "required": is_input and field.required} # if it is a tuple or a list it means that we are returning # the graphql type and the child type if isinstance(graphql_type, (list, tuple)): kwargs["of_type"] = graphql_type[1] graphql_type = graphql_type[0] if isinstance(field, serializers.ModelSerializer): if is_input: graphql_type = convert_serializer_to_input_type(field.__class__) else: global_registry = get_global_registry() field_model = field.Meta.model args = [global_registry.get_type_for_model(field_model)] elif isinstance(field, serializers.ListSerializer): field = field.child if is_input: kwargs["of_type"] = convert_serializer_to_input_type(field.__class__) else: del kwargs["of_type"] global_registry = get_global_registry() field_model = field.Meta.model args = [global_registry.get_type_for_model(field_model)] return graphql_type(*args, **kwargs)
python
def convert_serializer_field(field, is_input=True): """ Converts a django rest frameworks field to a graphql field and marks the field as required if we are creating an input type and the field itself is required """ graphql_type = get_graphene_type_from_serializer_field(field) args = [] kwargs = {"description": field.help_text, "required": is_input and field.required} # if it is a tuple or a list it means that we are returning # the graphql type and the child type if isinstance(graphql_type, (list, tuple)): kwargs["of_type"] = graphql_type[1] graphql_type = graphql_type[0] if isinstance(field, serializers.ModelSerializer): if is_input: graphql_type = convert_serializer_to_input_type(field.__class__) else: global_registry = get_global_registry() field_model = field.Meta.model args = [global_registry.get_type_for_model(field_model)] elif isinstance(field, serializers.ListSerializer): field = field.child if is_input: kwargs["of_type"] = convert_serializer_to_input_type(field.__class__) else: del kwargs["of_type"] global_registry = get_global_registry() field_model = field.Meta.model args = [global_registry.get_type_for_model(field_model)] return graphql_type(*args, **kwargs)
[ "def", "convert_serializer_field", "(", "field", ",", "is_input", "=", "True", ")", ":", "graphql_type", "=", "get_graphene_type_from_serializer_field", "(", "field", ")", "args", "=", "[", "]", "kwargs", "=", "{", "\"description\"", ":", "field", ".", "help_tex...
Converts a django rest frameworks field to a graphql field and marks the field as required if we are creating an input type and the field itself is required
[ "Converts", "a", "django", "rest", "frameworks", "field", "to", "a", "graphql", "field", "and", "marks", "the", "field", "as", "required", "if", "we", "are", "creating", "an", "input", "type", "and", "the", "field", "itself", "is", "required" ]
20160113948b4167b61dbdaa477bb301227aac2e
https://github.com/graphql-python/graphene-django/blob/20160113948b4167b61dbdaa477bb301227aac2e/graphene_django/rest_framework/serializer_converter.py#L21-L56
train
graphql-python/graphene-django
graphene_django/settings.py
perform_import
def perform_import(val, setting_name): """ If the given setting is a string import notation, then perform the necessary import or imports. """ if val is None: return None elif isinstance(val, six.string_types): return import_from_string(val, setting_name) elif isinstance(val, (list, tuple)): return [import_from_string(item, setting_name) for item in val] return val
python
def perform_import(val, setting_name): """ If the given setting is a string import notation, then perform the necessary import or imports. """ if val is None: return None elif isinstance(val, six.string_types): return import_from_string(val, setting_name) elif isinstance(val, (list, tuple)): return [import_from_string(item, setting_name) for item in val] return val
[ "def", "perform_import", "(", "val", ",", "setting_name", ")", ":", "if", "val", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "return", "import_from_string", "(", "val", ",", "setting_nam...
If the given setting is a string import notation, then perform the necessary import or imports.
[ "If", "the", "given", "setting", "is", "a", "string", "import", "notation", "then", "perform", "the", "necessary", "import", "or", "imports", "." ]
20160113948b4167b61dbdaa477bb301227aac2e
https://github.com/graphql-python/graphene-django/blob/20160113948b4167b61dbdaa477bb301227aac2e/graphene_django/settings.py#L47-L58
train
graphql-python/graphene-django
graphene_django/filter/filterset.py
custom_filterset_factory
def custom_filterset_factory(model, filterset_base_class=FilterSet, **meta): """ Create a filterset for the given model using the provided meta data """ meta.update({"model": model}) meta_class = type(str("Meta"), (object,), meta) filterset = type( str("%sFilterSet" % model._meta.object_name), (filterset_base_class, GrapheneFilterSetMixin), {"Meta": meta_class}, ) return filterset
python
def custom_filterset_factory(model, filterset_base_class=FilterSet, **meta): """ Create a filterset for the given model using the provided meta data """ meta.update({"model": model}) meta_class = type(str("Meta"), (object,), meta) filterset = type( str("%sFilterSet" % model._meta.object_name), (filterset_base_class, GrapheneFilterSetMixin), {"Meta": meta_class}, ) return filterset
[ "def", "custom_filterset_factory", "(", "model", ",", "filterset_base_class", "=", "FilterSet", ",", "*", "*", "meta", ")", ":", "meta", ".", "update", "(", "{", "\"model\"", ":", "model", "}", ")", "meta_class", "=", "type", "(", "str", "(", "\"Meta\"", ...
Create a filterset for the given model using the provided meta data
[ "Create", "a", "filterset", "for", "the", "given", "model", "using", "the", "provided", "meta", "data" ]
20160113948b4167b61dbdaa477bb301227aac2e
https://github.com/graphql-python/graphene-django/blob/20160113948b4167b61dbdaa477bb301227aac2e/graphene_django/filter/filterset.py#L95-L105
train
graphql-python/graphene-django
graphene_django/filter/filterset.py
GlobalIDFilter.filter
def filter(self, qs, value): """ Convert the filter value to a primary key before filtering """ _id = None if value is not None: _, _id = from_global_id(value) return super(GlobalIDFilter, self).filter(qs, _id)
python
def filter(self, qs, value): """ Convert the filter value to a primary key before filtering """ _id = None if value is not None: _, _id = from_global_id(value) return super(GlobalIDFilter, self).filter(qs, _id)
[ "def", "filter", "(", "self", ",", "qs", ",", "value", ")", ":", "_id", "=", "None", "if", "value", "is", "not", "None", ":", "_", ",", "_id", "=", "from_global_id", "(", "value", ")", "return", "super", "(", "GlobalIDFilter", ",", "self", ")", "."...
Convert the filter value to a primary key before filtering
[ "Convert", "the", "filter", "value", "to", "a", "primary", "key", "before", "filtering" ]
20160113948b4167b61dbdaa477bb301227aac2e
https://github.com/graphql-python/graphene-django/blob/20160113948b4167b61dbdaa477bb301227aac2e/graphene_django/filter/filterset.py#L16-L21
train
graphql-python/graphene-django
graphene_django/filter/utils.py
get_filtering_args_from_filterset
def get_filtering_args_from_filterset(filterset_class, type): """ Inspect a FilterSet and produce the arguments to pass to a Graphene Field. These arguments will be available to filter against in the GraphQL """ from ..forms.converter import convert_form_field args = {} for name, filter_field in six.iteritems(filterset_class.base_filters): field_type = convert_form_field(filter_field.field).Argument() field_type.description = filter_field.label args[name] = field_type return args
python
def get_filtering_args_from_filterset(filterset_class, type): """ Inspect a FilterSet and produce the arguments to pass to a Graphene Field. These arguments will be available to filter against in the GraphQL """ from ..forms.converter import convert_form_field args = {} for name, filter_field in six.iteritems(filterset_class.base_filters): field_type = convert_form_field(filter_field.field).Argument() field_type.description = filter_field.label args[name] = field_type return args
[ "def", "get_filtering_args_from_filterset", "(", "filterset_class", ",", "type", ")", ":", "from", ".", ".", "forms", ".", "converter", "import", "convert_form_field", "args", "=", "{", "}", "for", "name", ",", "filter_field", "in", "six", ".", "iteritems", "(...
Inspect a FilterSet and produce the arguments to pass to a Graphene Field. These arguments will be available to filter against in the GraphQL
[ "Inspect", "a", "FilterSet", "and", "produce", "the", "arguments", "to", "pass", "to", "a", "Graphene", "Field", ".", "These", "arguments", "will", "be", "available", "to", "filter", "against", "in", "the", "GraphQL" ]
20160113948b4167b61dbdaa477bb301227aac2e
https://github.com/graphql-python/graphene-django/blob/20160113948b4167b61dbdaa477bb301227aac2e/graphene_django/filter/utils.py#L6-L19
train
confluentinc/confluent-kafka-python
confluent_kafka/admin/__init__.py
AdminClient._make_topics_result
def _make_topics_result(f, futmap): """ Map per-topic results to per-topic futures in futmap. The result value of each (successful) future is None. """ try: result = f.result() for topic, error in result.items(): fut = futmap.get(topic, None) if fut is None: raise RuntimeError("Topic {} not found in future-map: {}".format(topic, futmap)) if error is not None: # Topic-level exception fut.set_exception(KafkaException(error)) else: # Topic-level success fut.set_result(None) except Exception as e: # Request-level exception, raise the same for all topics for topic, fut in futmap.items(): fut.set_exception(e)
python
def _make_topics_result(f, futmap): """ Map per-topic results to per-topic futures in futmap. The result value of each (successful) future is None. """ try: result = f.result() for topic, error in result.items(): fut = futmap.get(topic, None) if fut is None: raise RuntimeError("Topic {} not found in future-map: {}".format(topic, futmap)) if error is not None: # Topic-level exception fut.set_exception(KafkaException(error)) else: # Topic-level success fut.set_result(None) except Exception as e: # Request-level exception, raise the same for all topics for topic, fut in futmap.items(): fut.set_exception(e)
[ "def", "_make_topics_result", "(", "f", ",", "futmap", ")", ":", "try", ":", "result", "=", "f", ".", "result", "(", ")", "for", "topic", ",", "error", "in", "result", ".", "items", "(", ")", ":", "fut", "=", "futmap", ".", "get", "(", "topic", "...
Map per-topic results to per-topic futures in futmap. The result value of each (successful) future is None.
[ "Map", "per", "-", "topic", "results", "to", "per", "-", "topic", "futures", "in", "futmap", ".", "The", "result", "value", "of", "each", "(", "successful", ")", "future", "is", "None", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/admin/__init__.py#L219-L240
train
confluentinc/confluent-kafka-python
confluent_kafka/admin/__init__.py
AdminClient._make_resource_result
def _make_resource_result(f, futmap): """ Map per-resource results to per-resource futures in futmap. The result value of each (successful) future is a ConfigResource. """ try: result = f.result() for resource, configs in result.items(): fut = futmap.get(resource, None) if fut is None: raise RuntimeError("Resource {} not found in future-map: {}".format(resource, futmap)) if resource.error is not None: # Resource-level exception fut.set_exception(KafkaException(resource.error)) else: # Resource-level success # configs will be a dict for describe_configs() # and None for alter_configs() fut.set_result(configs) except Exception as e: # Request-level exception, raise the same for all resources for resource, fut in futmap.items(): fut.set_exception(e)
python
def _make_resource_result(f, futmap): """ Map per-resource results to per-resource futures in futmap. The result value of each (successful) future is a ConfigResource. """ try: result = f.result() for resource, configs in result.items(): fut = futmap.get(resource, None) if fut is None: raise RuntimeError("Resource {} not found in future-map: {}".format(resource, futmap)) if resource.error is not None: # Resource-level exception fut.set_exception(KafkaException(resource.error)) else: # Resource-level success # configs will be a dict for describe_configs() # and None for alter_configs() fut.set_result(configs) except Exception as e: # Request-level exception, raise the same for all resources for resource, fut in futmap.items(): fut.set_exception(e)
[ "def", "_make_resource_result", "(", "f", ",", "futmap", ")", ":", "try", ":", "result", "=", "f", ".", "result", "(", ")", "for", "resource", ",", "configs", "in", "result", ".", "items", "(", ")", ":", "fut", "=", "futmap", ".", "get", "(", "reso...
Map per-resource results to per-resource futures in futmap. The result value of each (successful) future is a ConfigResource.
[ "Map", "per", "-", "resource", "results", "to", "per", "-", "resource", "futures", "in", "futmap", ".", "The", "result", "value", "of", "each", "(", "successful", ")", "future", "is", "a", "ConfigResource", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/admin/__init__.py#L243-L265
train
confluentinc/confluent-kafka-python
confluent_kafka/admin/__init__.py
AdminClient._make_futures
def _make_futures(futmap_keys, class_check, make_result_fn): """ Create futures and a futuremap for the keys in futmap_keys, and create a request-level future to be bassed to the C API. """ futmap = {} for key in futmap_keys: if class_check is not None and not isinstance(key, class_check): raise ValueError("Expected list of {}".format(type(class_check))) futmap[key] = concurrent.futures.Future() if not futmap[key].set_running_or_notify_cancel(): raise RuntimeError("Future was cancelled prematurely") # Create an internal future for the entire request, # this future will trigger _make_..._result() and set result/exception # per topic,future in futmap. f = concurrent.futures.Future() f.add_done_callback(lambda f: make_result_fn(f, futmap)) if not f.set_running_or_notify_cancel(): raise RuntimeError("Future was cancelled prematurely") return f, futmap
python
def _make_futures(futmap_keys, class_check, make_result_fn): """ Create futures and a futuremap for the keys in futmap_keys, and create a request-level future to be bassed to the C API. """ futmap = {} for key in futmap_keys: if class_check is not None and not isinstance(key, class_check): raise ValueError("Expected list of {}".format(type(class_check))) futmap[key] = concurrent.futures.Future() if not futmap[key].set_running_or_notify_cancel(): raise RuntimeError("Future was cancelled prematurely") # Create an internal future for the entire request, # this future will trigger _make_..._result() and set result/exception # per topic,future in futmap. f = concurrent.futures.Future() f.add_done_callback(lambda f: make_result_fn(f, futmap)) if not f.set_running_or_notify_cancel(): raise RuntimeError("Future was cancelled prematurely") return f, futmap
[ "def", "_make_futures", "(", "futmap_keys", ",", "class_check", ",", "make_result_fn", ")", ":", "futmap", "=", "{", "}", "for", "key", "in", "futmap_keys", ":", "if", "class_check", "is", "not", "None", "and", "not", "isinstance", "(", "key", ",", "class_...
Create futures and a futuremap for the keys in futmap_keys, and create a request-level future to be bassed to the C API.
[ "Create", "futures", "and", "a", "futuremap", "for", "the", "keys", "in", "futmap_keys", "and", "create", "a", "request", "-", "level", "future", "to", "be", "bassed", "to", "the", "C", "API", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/admin/__init__.py#L268-L290
train
confluentinc/confluent-kafka-python
confluent_kafka/admin/__init__.py
AdminClient.create_topics
def create_topics(self, new_topics, **kwargs): """ Create new topics in cluster. The future result() value is None. :param list(NewTopic) new_topics: New topics to be created. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the CreateTopics request will block on the broker waiting for the topic creation to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the topic. Default: False :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures([x.topic for x in new_topics], None, AdminClient._make_topics_result) super(AdminClient, self).create_topics(new_topics, f, **kwargs) return futmap
python
def create_topics(self, new_topics, **kwargs): """ Create new topics in cluster. The future result() value is None. :param list(NewTopic) new_topics: New topics to be created. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the CreateTopics request will block on the broker waiting for the topic creation to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the topic. Default: False :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures([x.topic for x in new_topics], None, AdminClient._make_topics_result) super(AdminClient, self).create_topics(new_topics, f, **kwargs) return futmap
[ "def", "create_topics", "(", "self", ",", "new_topics", ",", "*", "*", "kwargs", ")", ":", "f", ",", "futmap", "=", "AdminClient", ".", "_make_futures", "(", "[", "x", ".", "topic", "for", "x", "in", "new_topics", "]", ",", "None", ",", "AdminClient", ...
Create new topics in cluster. The future result() value is None. :param list(NewTopic) new_topics: New topics to be created. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the CreateTopics request will block on the broker waiting for the topic creation to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the topic. Default: False :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input.
[ "Create", "new", "topics", "in", "cluster", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/admin/__init__.py#L292-L323
train
confluentinc/confluent-kafka-python
confluent_kafka/admin/__init__.py
AdminClient.delete_topics
def delete_topics(self, topics, **kwargs): """ Delete topics. The future result() value is None. :param list(str) topics: Topics to mark for deletion. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the DeleteTopics request will block on the broker waiting for the topic deletion to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures(topics, None, AdminClient._make_topics_result) super(AdminClient, self).delete_topics(topics, f, **kwargs) return futmap
python
def delete_topics(self, topics, **kwargs): """ Delete topics. The future result() value is None. :param list(str) topics: Topics to mark for deletion. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the DeleteTopics request will block on the broker waiting for the topic deletion to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures(topics, None, AdminClient._make_topics_result) super(AdminClient, self).delete_topics(topics, f, **kwargs) return futmap
[ "def", "delete_topics", "(", "self", ",", "topics", ",", "*", "*", "kwargs", ")", ":", "f", ",", "futmap", "=", "AdminClient", ".", "_make_futures", "(", "topics", ",", "None", ",", "AdminClient", ".", "_make_topics_result", ")", "super", "(", "AdminClient...
Delete topics. The future result() value is None. :param list(str) topics: Topics to mark for deletion. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the DeleteTopics request will block on the broker waiting for the topic deletion to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input.
[ "Delete", "topics", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/admin/__init__.py#L325-L353
train
confluentinc/confluent-kafka-python
confluent_kafka/admin/__init__.py
AdminClient.create_partitions
def create_partitions(self, new_partitions, **kwargs): """ Create additional partitions for the given topics. The future result() value is None. :param list(NewPartitions) new_partitions: New partitions to be created. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the CreatePartitions request will block on the broker waiting for the partition creation to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the partitions. Default: False :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures([x.topic for x in new_partitions], None, AdminClient._make_topics_result) super(AdminClient, self).create_partitions(new_partitions, f, **kwargs) return futmap
python
def create_partitions(self, new_partitions, **kwargs): """ Create additional partitions for the given topics. The future result() value is None. :param list(NewPartitions) new_partitions: New partitions to be created. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the CreatePartitions request will block on the broker waiting for the partition creation to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the partitions. Default: False :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures([x.topic for x in new_partitions], None, AdminClient._make_topics_result) super(AdminClient, self).create_partitions(new_partitions, f, **kwargs) return futmap
[ "def", "create_partitions", "(", "self", ",", "new_partitions", ",", "*", "*", "kwargs", ")", ":", "f", ",", "futmap", "=", "AdminClient", ".", "_make_futures", "(", "[", "x", ".", "topic", "for", "x", "in", "new_partitions", "]", ",", "None", ",", "Ad...
Create additional partitions for the given topics. The future result() value is None. :param list(NewPartitions) new_partitions: New partitions to be created. :param float operation_timeout: Set broker's operation timeout in seconds, controlling how long the CreatePartitions request will block on the broker waiting for the partition creation to propagate in the cluster. A value of 0 returns immediately. Default: 0 :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the partitions. Default: False :returns: a dict of futures for each topic, keyed by the topic name. :rtype: dict(<topic_name, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input.
[ "Create", "additional", "partitions", "for", "the", "given", "topics", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/admin/__init__.py#L355-L386
train
confluentinc/confluent-kafka-python
confluent_kafka/admin/__init__.py
AdminClient.describe_configs
def describe_configs(self, resources, **kwargs): """ Get configuration for the specified resources. The future result() value is a dict(<configname, ConfigEntry>). :warning: Multiple resources and resource types may be requested, but at most one resource of type RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource. :param list(ConfigResource) resources: Resources to get configuration for. :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the partitions. Default: False :returns: a dict of futures for each resource, keyed by the ConfigResource. :rtype: dict(<ConfigResource, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures(resources, ConfigResource, AdminClient._make_resource_result) super(AdminClient, self).describe_configs(resources, f, **kwargs) return futmap
python
def describe_configs(self, resources, **kwargs): """ Get configuration for the specified resources. The future result() value is a dict(<configname, ConfigEntry>). :warning: Multiple resources and resource types may be requested, but at most one resource of type RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource. :param list(ConfigResource) resources: Resources to get configuration for. :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the partitions. Default: False :returns: a dict of futures for each resource, keyed by the ConfigResource. :rtype: dict(<ConfigResource, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input. """ f, futmap = AdminClient._make_futures(resources, ConfigResource, AdminClient._make_resource_result) super(AdminClient, self).describe_configs(resources, f, **kwargs) return futmap
[ "def", "describe_configs", "(", "self", ",", "resources", ",", "*", "*", "kwargs", ")", ":", "f", ",", "futmap", "=", "AdminClient", ".", "_make_futures", "(", "resources", ",", "ConfigResource", ",", "AdminClient", ".", "_make_resource_result", ")", "super", ...
Get configuration for the specified resources. The future result() value is a dict(<configname, ConfigEntry>). :warning: Multiple resources and resource types may be requested, but at most one resource of type RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource. :param list(ConfigResource) resources: Resources to get configuration for. :param float request_timeout: Set the overall request timeout in seconds, including broker lookup, request transmission, operation time on broker, and response. Default: `socket.timeout.ms*1000.0` :param bool validate_only: Tell broker to only validate the request, without creating the partitions. Default: False :returns: a dict of futures for each resource, keyed by the ConfigResource. :rtype: dict(<ConfigResource, future>) :raises KafkaException: Operation failed locally or on broker. :raises TypeException: Invalid input. :raises ValueException: Invalid input.
[ "Get", "configuration", "for", "the", "specified", "resources", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/admin/__init__.py#L388-L419
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/load.py
loads
def loads(schema_str): """ Parse a schema given a schema string """ try: if sys.version_info[0] < 3: return schema.parse(schema_str) else: return schema.Parse(schema_str) except schema.SchemaParseException as e: raise ClientError("Schema parse failed: %s" % (str(e)))
python
def loads(schema_str): """ Parse a schema given a schema string """ try: if sys.version_info[0] < 3: return schema.parse(schema_str) else: return schema.Parse(schema_str) except schema.SchemaParseException as e: raise ClientError("Schema parse failed: %s" % (str(e)))
[ "def", "loads", "(", "schema_str", ")", ":", "try", ":", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "return", "schema", ".", "parse", "(", "schema_str", ")", "else", ":", "return", "schema", ".", "Parse", "(", "schema_str", ")", ...
Parse a schema given a schema string
[ "Parse", "a", "schema", "given", "a", "schema", "string" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/load.py#L23-L31
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/__init__.py
AvroProducer.produce
def produce(self, **kwargs): """ Asynchronously sends message to Kafka by encoding with specified or default avro schema. :param str topic: topic name :param object value: An object to serialize :param str value_schema: Avro schema for value :param object key: An object to serialize :param str key_schema: Avro schema for key Plus any other parameters accepted by confluent_kafka.Producer.produce :raises SerializerError: On serialization failure :raises BufferError: If producer queue is full. :raises KafkaException: For other produce failures. """ # get schemas from kwargs if defined key_schema = kwargs.pop('key_schema', self._key_schema) value_schema = kwargs.pop('value_schema', self._value_schema) topic = kwargs.pop('topic', None) if not topic: raise ClientError("Topic name not specified.") value = kwargs.pop('value', None) key = kwargs.pop('key', None) if value is not None: if value_schema: value = self._serializer.encode_record_with_schema(topic, value_schema, value) else: raise ValueSerializerError("Avro schema required for values") if key is not None: if key_schema: key = self._serializer.encode_record_with_schema(topic, key_schema, key, True) else: raise KeySerializerError("Avro schema required for key") super(AvroProducer, self).produce(topic, value, key, **kwargs)
python
def produce(self, **kwargs): """ Asynchronously sends message to Kafka by encoding with specified or default avro schema. :param str topic: topic name :param object value: An object to serialize :param str value_schema: Avro schema for value :param object key: An object to serialize :param str key_schema: Avro schema for key Plus any other parameters accepted by confluent_kafka.Producer.produce :raises SerializerError: On serialization failure :raises BufferError: If producer queue is full. :raises KafkaException: For other produce failures. """ # get schemas from kwargs if defined key_schema = kwargs.pop('key_schema', self._key_schema) value_schema = kwargs.pop('value_schema', self._value_schema) topic = kwargs.pop('topic', None) if not topic: raise ClientError("Topic name not specified.") value = kwargs.pop('value', None) key = kwargs.pop('key', None) if value is not None: if value_schema: value = self._serializer.encode_record_with_schema(topic, value_schema, value) else: raise ValueSerializerError("Avro schema required for values") if key is not None: if key_schema: key = self._serializer.encode_record_with_schema(topic, key_schema, key, True) else: raise KeySerializerError("Avro schema required for key") super(AvroProducer, self).produce(topic, value, key, **kwargs)
[ "def", "produce", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# get schemas from kwargs if defined", "key_schema", "=", "kwargs", ".", "pop", "(", "'key_schema'", ",", "self", ".", "_key_schema", ")", "value_schema", "=", "kwargs", ".", "pop", "(", "'v...
Asynchronously sends message to Kafka by encoding with specified or default avro schema. :param str topic: topic name :param object value: An object to serialize :param str value_schema: Avro schema for value :param object key: An object to serialize :param str key_schema: Avro schema for key Plus any other parameters accepted by confluent_kafka.Producer.produce :raises SerializerError: On serialization failure :raises BufferError: If producer queue is full. :raises KafkaException: For other produce failures.
[ "Asynchronously", "sends", "message", "to", "Kafka", "by", "encoding", "with", "specified", "or", "default", "avro", "schema", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/__init__.py#L53-L90
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/__init__.py
AvroConsumer.poll
def poll(self, timeout=None): """ This is an overriden method from confluent_kafka.Consumer class. This handles message deserialization using avro schema :param float timeout: Poll timeout in seconds (default: indefinite) :returns: message object with deserialized key and value as dict objects :rtype: Message """ if timeout is None: timeout = -1 message = super(AvroConsumer, self).poll(timeout) if message is None: return None if not message.error(): try: if message.value() is not None: decoded_value = self._serializer.decode_message(message.value(), is_key=False) message.set_value(decoded_value) if message.key() is not None: decoded_key = self._serializer.decode_message(message.key(), is_key=True) message.set_key(decoded_key) except SerializerError as e: raise SerializerError("Message deserialization failed for message at {} [{}] offset {}: {}".format( message.topic(), message.partition(), message.offset(), e)) return message
python
def poll(self, timeout=None): """ This is an overriden method from confluent_kafka.Consumer class. This handles message deserialization using avro schema :param float timeout: Poll timeout in seconds (default: indefinite) :returns: message object with deserialized key and value as dict objects :rtype: Message """ if timeout is None: timeout = -1 message = super(AvroConsumer, self).poll(timeout) if message is None: return None if not message.error(): try: if message.value() is not None: decoded_value = self._serializer.decode_message(message.value(), is_key=False) message.set_value(decoded_value) if message.key() is not None: decoded_key = self._serializer.decode_message(message.key(), is_key=True) message.set_key(decoded_key) except SerializerError as e: raise SerializerError("Message deserialization failed for message at {} [{}] offset {}: {}".format( message.topic(), message.partition(), message.offset(), e)) return message
[ "def", "poll", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "timeout", "=", "-", "1", "message", "=", "super", "(", "AvroConsumer", ",", "self", ")", ".", "poll", "(", "timeout", ")", "if", "message", "is"...
This is an overriden method from confluent_kafka.Consumer class. This handles message deserialization using avro schema :param float timeout: Poll timeout in seconds (default: indefinite) :returns: message object with deserialized key and value as dict objects :rtype: Message
[ "This", "is", "an", "overriden", "method", "from", "confluent_kafka", ".", "Consumer", "class", ".", "This", "handles", "message", "deserialization", "using", "avro", "schema" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/__init__.py#L128-L157
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/serializer/message_serializer.py
MessageSerializer.encode_record_with_schema
def encode_record_with_schema(self, topic, schema, record, is_key=False): """ Given a parsed avro schema, encode a record for the given topic. The record is expected to be a dictionary. The schema is registered with the subject of 'topic-value' :param str topic: Topic name :param schema schema: Avro Schema :param dict record: An object to serialize :param bool is_key: If the record is a key :returns: Encoded record with schema ID as bytes :rtype: bytes """ serialize_err = KeySerializerError if is_key else ValueSerializerError subject_suffix = ('-key' if is_key else '-value') # get the latest schema for the subject subject = topic + subject_suffix # register it schema_id = self.registry_client.register(subject, schema) if not schema_id: message = "Unable to retrieve schema id for subject %s" % (subject) raise serialize_err(message) # cache writer self.id_to_writers[schema_id] = self._get_encoder_func(schema) return self.encode_record_with_schema_id(schema_id, record, is_key=is_key)
python
def encode_record_with_schema(self, topic, schema, record, is_key=False): """ Given a parsed avro schema, encode a record for the given topic. The record is expected to be a dictionary. The schema is registered with the subject of 'topic-value' :param str topic: Topic name :param schema schema: Avro Schema :param dict record: An object to serialize :param bool is_key: If the record is a key :returns: Encoded record with schema ID as bytes :rtype: bytes """ serialize_err = KeySerializerError if is_key else ValueSerializerError subject_suffix = ('-key' if is_key else '-value') # get the latest schema for the subject subject = topic + subject_suffix # register it schema_id = self.registry_client.register(subject, schema) if not schema_id: message = "Unable to retrieve schema id for subject %s" % (subject) raise serialize_err(message) # cache writer self.id_to_writers[schema_id] = self._get_encoder_func(schema) return self.encode_record_with_schema_id(schema_id, record, is_key=is_key)
[ "def", "encode_record_with_schema", "(", "self", ",", "topic", ",", "schema", ",", "record", ",", "is_key", "=", "False", ")", ":", "serialize_err", "=", "KeySerializerError", "if", "is_key", "else", "ValueSerializerError", "subject_suffix", "=", "(", "'-key'", ...
Given a parsed avro schema, encode a record for the given topic. The record is expected to be a dictionary. The schema is registered with the subject of 'topic-value' :param str topic: Topic name :param schema schema: Avro Schema :param dict record: An object to serialize :param bool is_key: If the record is a key :returns: Encoded record with schema ID as bytes :rtype: bytes
[ "Given", "a", "parsed", "avro", "schema", "encode", "a", "record", "for", "the", "given", "topic", ".", "The", "record", "is", "expected", "to", "be", "a", "dictionary", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/serializer/message_serializer.py#L86-L113
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/serializer/message_serializer.py
MessageSerializer.encode_record_with_schema_id
def encode_record_with_schema_id(self, schema_id, record, is_key=False): """ Encode a record with a given schema id. The record must be a python dictionary. :param int schema_id: integer ID :param dict record: An object to serialize :param bool is_key: If the record is a key :returns: decoder function :rtype: func """ serialize_err = KeySerializerError if is_key else ValueSerializerError # use slow avro if schema_id not in self.id_to_writers: # get the writer + schema try: schema = self.registry_client.get_by_id(schema_id) if not schema: raise serialize_err("Schema does not exist") self.id_to_writers[schema_id] = self._get_encoder_func(schema) except ClientError: exc_type, exc_value, exc_traceback = sys.exc_info() raise serialize_err(repr(traceback.format_exception(exc_type, exc_value, exc_traceback))) # get the writer writer = self.id_to_writers[schema_id] with ContextStringIO() as outf: # Write the magic byte and schema ID in network byte order (big endian) outf.write(struct.pack('>bI', MAGIC_BYTE, schema_id)) # write the record to the rest of the buffer writer(record, outf) return outf.getvalue()
python
def encode_record_with_schema_id(self, schema_id, record, is_key=False): """ Encode a record with a given schema id. The record must be a python dictionary. :param int schema_id: integer ID :param dict record: An object to serialize :param bool is_key: If the record is a key :returns: decoder function :rtype: func """ serialize_err = KeySerializerError if is_key else ValueSerializerError # use slow avro if schema_id not in self.id_to_writers: # get the writer + schema try: schema = self.registry_client.get_by_id(schema_id) if not schema: raise serialize_err("Schema does not exist") self.id_to_writers[schema_id] = self._get_encoder_func(schema) except ClientError: exc_type, exc_value, exc_traceback = sys.exc_info() raise serialize_err(repr(traceback.format_exception(exc_type, exc_value, exc_traceback))) # get the writer writer = self.id_to_writers[schema_id] with ContextStringIO() as outf: # Write the magic byte and schema ID in network byte order (big endian) outf.write(struct.pack('>bI', MAGIC_BYTE, schema_id)) # write the record to the rest of the buffer writer(record, outf) return outf.getvalue()
[ "def", "encode_record_with_schema_id", "(", "self", ",", "schema_id", ",", "record", ",", "is_key", "=", "False", ")", ":", "serialize_err", "=", "KeySerializerError", "if", "is_key", "else", "ValueSerializerError", "# use slow avro", "if", "schema_id", "not", "in",...
Encode a record with a given schema id. The record must be a python dictionary. :param int schema_id: integer ID :param dict record: An object to serialize :param bool is_key: If the record is a key :returns: decoder function :rtype: func
[ "Encode", "a", "record", "with", "a", "given", "schema", "id", ".", "The", "record", "must", "be", "a", "python", "dictionary", ".", ":", "param", "int", "schema_id", ":", "integer", "ID", ":", "param", "dict", "record", ":", "An", "object", "to", "ser...
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/serializer/message_serializer.py#L115-L149
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/serializer/message_serializer.py
MessageSerializer.decode_message
def decode_message(self, message, is_key=False): """ Decode a message from kafka that has been encoded for use with the schema registry. :param str|bytes or None message: message key or value to be decoded :returns: Decoded message contents. :rtype dict: """ if message is None: return None if len(message) <= 5: raise SerializerError("message is too small to decode") with ContextStringIO(message) as payload: magic, schema_id = struct.unpack('>bI', payload.read(5)) if magic != MAGIC_BYTE: raise SerializerError("message does not start with magic byte") decoder_func = self._get_decoder_func(schema_id, payload, is_key) return decoder_func(payload)
python
def decode_message(self, message, is_key=False): """ Decode a message from kafka that has been encoded for use with the schema registry. :param str|bytes or None message: message key or value to be decoded :returns: Decoded message contents. :rtype dict: """ if message is None: return None if len(message) <= 5: raise SerializerError("message is too small to decode") with ContextStringIO(message) as payload: magic, schema_id = struct.unpack('>bI', payload.read(5)) if magic != MAGIC_BYTE: raise SerializerError("message does not start with magic byte") decoder_func = self._get_decoder_func(schema_id, payload, is_key) return decoder_func(payload)
[ "def", "decode_message", "(", "self", ",", "message", ",", "is_key", "=", "False", ")", ":", "if", "message", "is", "None", ":", "return", "None", "if", "len", "(", "message", ")", "<=", "5", ":", "raise", "SerializerError", "(", "\"message is too small to...
Decode a message from kafka that has been encoded for use with the schema registry. :param str|bytes or None message: message key or value to be decoded :returns: Decoded message contents. :rtype dict:
[ "Decode", "a", "message", "from", "kafka", "that", "has", "been", "encoded", "for", "use", "with", "the", "schema", "registry", ".", ":", "param", "str|bytes", "or", "None", "message", ":", "message", "key", "or", "value", "to", "be", "decoded", ":", "re...
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/serializer/message_serializer.py#L207-L227
train
confluentinc/confluent-kafka-python
examples/confluent_cloud.py
acked
def acked(err, msg): """Delivery report callback called (from flush()) on successful or failed delivery of the message.""" if err is not None: print("failed to deliver message: {}".format(err.str())) else: print("produced to: {} [{}] @ {}".format(msg.topic(), msg.partition(), msg.offset()))
python
def acked(err, msg): """Delivery report callback called (from flush()) on successful or failed delivery of the message.""" if err is not None: print("failed to deliver message: {}".format(err.str())) else: print("produced to: {} [{}] @ {}".format(msg.topic(), msg.partition(), msg.offset()))
[ "def", "acked", "(", "err", ",", "msg", ")", ":", "if", "err", "is", "not", "None", ":", "print", "(", "\"failed to deliver message: {}\"", ".", "format", "(", "err", ".", "str", "(", ")", ")", ")", "else", ":", "print", "(", "\"produced to: {} [{}] @ {}...
Delivery report callback called (from flush()) on successful or failed delivery of the message.
[ "Delivery", "report", "callback", "called", "(", "from", "flush", "()", ")", "on", "successful", "or", "failed", "delivery", "of", "the", "message", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/confluent_cloud.py#L65-L70
train
confluentinc/confluent-kafka-python
examples/adminapi.py
example_create_topics
def example_create_topics(a, topics): """ Create topics """ new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in topics] # Call create_topics to asynchronously create topics, a dict # of <topic,future> is returned. fs = a.create_topics(new_topics) # Wait for operation to finish. # Timeouts are preferably controlled by passing request_timeout=15.0 # to the create_topics() call. # All futures will finish at the same time. for topic, f in fs.items(): try: f.result() # The result itself is None print("Topic {} created".format(topic)) except Exception as e: print("Failed to create topic {}: {}".format(topic, e))
python
def example_create_topics(a, topics): """ Create topics """ new_topics = [NewTopic(topic, num_partitions=3, replication_factor=1) for topic in topics] # Call create_topics to asynchronously create topics, a dict # of <topic,future> is returned. fs = a.create_topics(new_topics) # Wait for operation to finish. # Timeouts are preferably controlled by passing request_timeout=15.0 # to the create_topics() call. # All futures will finish at the same time. for topic, f in fs.items(): try: f.result() # The result itself is None print("Topic {} created".format(topic)) except Exception as e: print("Failed to create topic {}: {}".format(topic, e))
[ "def", "example_create_topics", "(", "a", ",", "topics", ")", ":", "new_topics", "=", "[", "NewTopic", "(", "topic", ",", "num_partitions", "=", "3", ",", "replication_factor", "=", "1", ")", "for", "topic", "in", "topics", "]", "# Call create_topics to asynch...
Create topics
[ "Create", "topics" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L31-L48
train
confluentinc/confluent-kafka-python
examples/adminapi.py
example_delete_topics
def example_delete_topics(a, topics): """ delete topics """ # Call delete_topics to asynchronously delete topics, a future is returned. # By default this operation on the broker returns immediately while # topics are deleted in the background. But here we give it some time (30s) # to propagate in the cluster before returning. # # Returns a dict of <topic,future>. fs = a.delete_topics(topics, operation_timeout=30) # Wait for operation to finish. for topic, f in fs.items(): try: f.result() # The result itself is None print("Topic {} deleted".format(topic)) except Exception as e: print("Failed to delete topic {}: {}".format(topic, e))
python
def example_delete_topics(a, topics): """ delete topics """ # Call delete_topics to asynchronously delete topics, a future is returned. # By default this operation on the broker returns immediately while # topics are deleted in the background. But here we give it some time (30s) # to propagate in the cluster before returning. # # Returns a dict of <topic,future>. fs = a.delete_topics(topics, operation_timeout=30) # Wait for operation to finish. for topic, f in fs.items(): try: f.result() # The result itself is None print("Topic {} deleted".format(topic)) except Exception as e: print("Failed to delete topic {}: {}".format(topic, e))
[ "def", "example_delete_topics", "(", "a", ",", "topics", ")", ":", "# Call delete_topics to asynchronously delete topics, a future is returned.", "# By default this operation on the broker returns immediately while", "# topics are deleted in the background. But here we give it some time (30s)", ...
delete topics
[ "delete", "topics" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L51-L68
train
confluentinc/confluent-kafka-python
examples/adminapi.py
example_create_partitions
def example_create_partitions(a, topics): """ create partitions """ new_parts = [NewPartitions(topic, int(new_total_count)) for topic, new_total_count in zip(topics[0::2], topics[1::2])] # Try switching validate_only to True to only validate the operation # on the broker but not actually perform it. fs = a.create_partitions(new_parts, validate_only=False) # Wait for operation to finish. for topic, f in fs.items(): try: f.result() # The result itself is None print("Additional partitions created for topic {}".format(topic)) except Exception as e: print("Failed to add partitions to topic {}: {}".format(topic, e))
python
def example_create_partitions(a, topics): """ create partitions """ new_parts = [NewPartitions(topic, int(new_total_count)) for topic, new_total_count in zip(topics[0::2], topics[1::2])] # Try switching validate_only to True to only validate the operation # on the broker but not actually perform it. fs = a.create_partitions(new_parts, validate_only=False) # Wait for operation to finish. for topic, f in fs.items(): try: f.result() # The result itself is None print("Additional partitions created for topic {}".format(topic)) except Exception as e: print("Failed to add partitions to topic {}: {}".format(topic, e))
[ "def", "example_create_partitions", "(", "a", ",", "topics", ")", ":", "new_parts", "=", "[", "NewPartitions", "(", "topic", ",", "int", "(", "new_total_count", ")", ")", "for", "topic", ",", "new_total_count", "in", "zip", "(", "topics", "[", "0", ":", ...
create partitions
[ "create", "partitions" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L71-L87
train
confluentinc/confluent-kafka-python
examples/adminapi.py
example_describe_configs
def example_describe_configs(a, args): """ describe configs """ resources = [ConfigResource(restype, resname) for restype, resname in zip(args[0::2], args[1::2])] fs = a.describe_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: configs = f.result() for config in iter(configs.values()): print_config(config, 1) except KafkaException as e: print("Failed to describe {}: {}".format(res, e)) except Exception: raise
python
def example_describe_configs(a, args): """ describe configs """ resources = [ConfigResource(restype, resname) for restype, resname in zip(args[0::2], args[1::2])] fs = a.describe_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: configs = f.result() for config in iter(configs.values()): print_config(config, 1) except KafkaException as e: print("Failed to describe {}: {}".format(res, e)) except Exception: raise
[ "def", "example_describe_configs", "(", "a", ",", "args", ")", ":", "resources", "=", "[", "ConfigResource", "(", "restype", ",", "resname", ")", "for", "restype", ",", "resname", "in", "zip", "(", "args", "[", "0", ":", ":", "2", "]", ",", "args", "...
describe configs
[ "describe", "configs" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L99-L117
train
confluentinc/confluent-kafka-python
examples/adminapi.py
example_alter_configs
def example_alter_configs(a, args): """ Alter configs atomically, replacing non-specified configuration properties with their default values. """ resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) fs = a.alter_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: f.result() # empty, but raises exception on failure print("{} configuration successfully altered".format(res)) except Exception: raise
python
def example_alter_configs(a, args): """ Alter configs atomically, replacing non-specified configuration properties with their default values. """ resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) fs = a.alter_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: f.result() # empty, but raises exception on failure print("{} configuration successfully altered".format(res)) except Exception: raise
[ "def", "example_alter_configs", "(", "a", ",", "args", ")", ":", "resources", "=", "[", "]", "for", "restype", ",", "resname", ",", "configs", "in", "zip", "(", "args", "[", "0", ":", ":", "3", "]", ",", "args", "[", "1", ":", ":", "3", "]", ",...
Alter configs atomically, replacing non-specified configuration properties with their default values.
[ "Alter", "configs", "atomically", "replacing", "non", "-", "specified", "configuration", "properties", "with", "their", "default", "values", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L120-L140
train
confluentinc/confluent-kafka-python
examples/adminapi.py
example_delta_alter_configs
def example_delta_alter_configs(a, args): """ The AlterConfigs Kafka API requires all configuration to be passed, any left out configuration properties will revert to their default settings. This example shows how to just modify the supplied configuration entries by first reading the configuration from the broker, updating the supplied configuration with the broker configuration (without overwriting), and then writing it all back. The async nature of futures is also show-cased, which makes this example a bit more complex than it needs to be in the synchronous case. """ # Convert supplied config to resources. # We can reuse the same resources both for describe_configs and # alter_configs. resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) # Set up a locked counter and an Event (for signaling) to track when the # second level of futures are done. This is a bit of contrived example # due to no other asynchronous mechanism being used, so we'll need # to wait on something to signal completion. class WaitZero(object): def __init__(self, waitcnt): self.cnt = waitcnt self.lock = threading.Lock() self.event = threading.Event() def decr(self): """ Decrement cnt by 1""" with self.lock: assert self.cnt > 0 self.cnt -= 1 self.event.set() def wait(self): """ Wait until cnt reaches 0 """ self.lock.acquire() while self.cnt > 0: self.lock.release() self.event.wait() self.event.clear() self.lock.acquire() self.lock.release() def __len__(self): with self.lock: return self.cnt wait_zero = WaitZero(len(resources)) # Read existing configuration from cluster fs = a.describe_configs(resources) def delta_alter_configs_done(fut, resource): e = fut.exception() if e is not None: print("Config update for {} failed: {}".format(resource, e)) else: print("Config for {} updated".format(resource)) wait_zero.decr() def delta_alter_configs(resource, remote_config): print("Updating {} supplied config entries {} with {} config entries read from cluster".format( len(resource), resource, len(remote_config))) # Only set configuration that is not default for k, entry in [(k, v) for k, v in remote_config.items() if not v.is_default]: resource.set_config(k, entry.value, overwrite=False) fs = a.alter_configs([resource]) fs[resource].add_done_callback(lambda fut: delta_alter_configs_done(fut, resource)) # For each resource's future set up a completion callback # that in turn calls alter_configs() on that single resource. # This is ineffective since the resources can usually go in # one single alter_configs() call, but we're also show-casing # the futures here. for res, f in fs.items(): f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result())) # Wait for done callbacks to be triggered and operations to complete. print("Waiting for {} resource updates to finish".format(len(wait_zero))) wait_zero.wait()
python
def example_delta_alter_configs(a, args): """ The AlterConfigs Kafka API requires all configuration to be passed, any left out configuration properties will revert to their default settings. This example shows how to just modify the supplied configuration entries by first reading the configuration from the broker, updating the supplied configuration with the broker configuration (without overwriting), and then writing it all back. The async nature of futures is also show-cased, which makes this example a bit more complex than it needs to be in the synchronous case. """ # Convert supplied config to resources. # We can reuse the same resources both for describe_configs and # alter_configs. resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) # Set up a locked counter and an Event (for signaling) to track when the # second level of futures are done. This is a bit of contrived example # due to no other asynchronous mechanism being used, so we'll need # to wait on something to signal completion. class WaitZero(object): def __init__(self, waitcnt): self.cnt = waitcnt self.lock = threading.Lock() self.event = threading.Event() def decr(self): """ Decrement cnt by 1""" with self.lock: assert self.cnt > 0 self.cnt -= 1 self.event.set() def wait(self): """ Wait until cnt reaches 0 """ self.lock.acquire() while self.cnt > 0: self.lock.release() self.event.wait() self.event.clear() self.lock.acquire() self.lock.release() def __len__(self): with self.lock: return self.cnt wait_zero = WaitZero(len(resources)) # Read existing configuration from cluster fs = a.describe_configs(resources) def delta_alter_configs_done(fut, resource): e = fut.exception() if e is not None: print("Config update for {} failed: {}".format(resource, e)) else: print("Config for {} updated".format(resource)) wait_zero.decr() def delta_alter_configs(resource, remote_config): print("Updating {} supplied config entries {} with {} config entries read from cluster".format( len(resource), resource, len(remote_config))) # Only set configuration that is not default for k, entry in [(k, v) for k, v in remote_config.items() if not v.is_default]: resource.set_config(k, entry.value, overwrite=False) fs = a.alter_configs([resource]) fs[resource].add_done_callback(lambda fut: delta_alter_configs_done(fut, resource)) # For each resource's future set up a completion callback # that in turn calls alter_configs() on that single resource. # This is ineffective since the resources can usually go in # one single alter_configs() call, but we're also show-casing # the futures here. for res, f in fs.items(): f.add_done_callback(lambda fut, resource=res: delta_alter_configs(resource, fut.result())) # Wait for done callbacks to be triggered and operations to complete. print("Waiting for {} resource updates to finish".format(len(wait_zero))) wait_zero.wait()
[ "def", "example_delta_alter_configs", "(", "a", ",", "args", ")", ":", "# Convert supplied config to resources.", "# We can reuse the same resources both for describe_configs and", "# alter_configs.", "resources", "=", "[", "]", "for", "restype", ",", "resname", ",", "configs...
The AlterConfigs Kafka API requires all configuration to be passed, any left out configuration properties will revert to their default settings. This example shows how to just modify the supplied configuration entries by first reading the configuration from the broker, updating the supplied configuration with the broker configuration (without overwriting), and then writing it all back. The async nature of futures is also show-cased, which makes this example a bit more complex than it needs to be in the synchronous case.
[ "The", "AlterConfigs", "Kafka", "API", "requires", "all", "configuration", "to", "be", "passed", "any", "left", "out", "configuration", "properties", "will", "revert", "to", "their", "default", "settings", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L143-L232
train
confluentinc/confluent-kafka-python
examples/adminapi.py
example_list
def example_list(a, args): """ list topics and cluster metadata """ if len(args) == 0: what = "all" else: what = args[0] md = a.list_topics(timeout=10) print("Cluster {} metadata (response from broker {}):".format(md.cluster_id, md.orig_broker_name)) if what in ("all", "brokers"): print(" {} brokers:".format(len(md.brokers))) for b in iter(md.brokers.values()): if b.id == md.controller_id: print(" {} (controller)".format(b)) else: print(" {}".format(b)) if what not in ("all", "topics"): return print(" {} topics:".format(len(md.topics))) for t in iter(md.topics.values()): if t.error is not None: errstr = ": {}".format(t.error) else: errstr = "" print(" \"{}\" with {} partition(s){}".format(t, len(t.partitions), errstr)) for p in iter(t.partitions.values()): if p.error is not None: errstr = ": {}".format(p.error) else: errstr = "" print(" partition {} leader: {}, replicas: {}, isrs: {}".format( p.id, p.leader, p.replicas, p.isrs, errstr))
python
def example_list(a, args): """ list topics and cluster metadata """ if len(args) == 0: what = "all" else: what = args[0] md = a.list_topics(timeout=10) print("Cluster {} metadata (response from broker {}):".format(md.cluster_id, md.orig_broker_name)) if what in ("all", "brokers"): print(" {} brokers:".format(len(md.brokers))) for b in iter(md.brokers.values()): if b.id == md.controller_id: print(" {} (controller)".format(b)) else: print(" {}".format(b)) if what not in ("all", "topics"): return print(" {} topics:".format(len(md.topics))) for t in iter(md.topics.values()): if t.error is not None: errstr = ": {}".format(t.error) else: errstr = "" print(" \"{}\" with {} partition(s){}".format(t, len(t.partitions), errstr)) for p in iter(t.partitions.values()): if p.error is not None: errstr = ": {}".format(p.error) else: errstr = "" print(" partition {} leader: {}, replicas: {}, isrs: {}".format( p.id, p.leader, p.replicas, p.isrs, errstr))
[ "def", "example_list", "(", "a", ",", "args", ")", ":", "if", "len", "(", "args", ")", "==", "0", ":", "what", "=", "\"all\"", "else", ":", "what", "=", "args", "[", "0", "]", "md", "=", "a", ".", "list_topics", "(", "timeout", "=", "10", ")", ...
list topics and cluster metadata
[ "list", "topics", "and", "cluster", "metadata" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/adminapi.py#L235-L274
train
confluentinc/confluent-kafka-python
confluent_kafka/__init__.py
_resolve_plugins
def _resolve_plugins(plugins): """ Resolve embedded plugins from the wheel's library directory. For internal module use only. :param str plugins: The plugin.library.paths value """ import os from sys import platform # Location of __init__.py and the embedded library directory basedir = os.path.dirname(__file__) if platform in ('win32', 'cygwin'): paths_sep = ';' ext = '.dll' libdir = basedir elif platform in ('linux', 'linux2'): paths_sep = ':' ext = '.so' libdir = os.path.join(basedir, '.libs') elif platform == 'darwin': paths_sep = ':' ext = '.dylib' libdir = os.path.join(basedir, '.dylibs') else: # Unknown platform, there are probably no embedded plugins. return plugins if not os.path.isdir(libdir): # No embedded library directory, probably not a wheel installation. return plugins resolved = [] for plugin in plugins.split(paths_sep): if '/' in plugin or '\\' in plugin: # Path specified, leave unchanged resolved.append(plugin) continue # See if the plugin can be found in the wheel's # embedded library directory. # The user might not have supplied a file extension, so try both. good = None for file in [plugin, plugin + ext]: fpath = os.path.join(libdir, file) if os.path.isfile(fpath): good = fpath break if good is not None: resolved.append(good) else: resolved.append(plugin) return paths_sep.join(resolved)
python
def _resolve_plugins(plugins): """ Resolve embedded plugins from the wheel's library directory. For internal module use only. :param str plugins: The plugin.library.paths value """ import os from sys import platform # Location of __init__.py and the embedded library directory basedir = os.path.dirname(__file__) if platform in ('win32', 'cygwin'): paths_sep = ';' ext = '.dll' libdir = basedir elif platform in ('linux', 'linux2'): paths_sep = ':' ext = '.so' libdir = os.path.join(basedir, '.libs') elif platform == 'darwin': paths_sep = ':' ext = '.dylib' libdir = os.path.join(basedir, '.dylibs') else: # Unknown platform, there are probably no embedded plugins. return plugins if not os.path.isdir(libdir): # No embedded library directory, probably not a wheel installation. return plugins resolved = [] for plugin in plugins.split(paths_sep): if '/' in plugin or '\\' in plugin: # Path specified, leave unchanged resolved.append(plugin) continue # See if the plugin can be found in the wheel's # embedded library directory. # The user might not have supplied a file extension, so try both. good = None for file in [plugin, plugin + ext]: fpath = os.path.join(libdir, file) if os.path.isfile(fpath): good = fpath break if good is not None: resolved.append(good) else: resolved.append(plugin) return paths_sep.join(resolved)
[ "def", "_resolve_plugins", "(", "plugins", ")", ":", "import", "os", "from", "sys", "import", "platform", "# Location of __init__.py and the embedded library directory", "basedir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "if", "platform", "in",...
Resolve embedded plugins from the wheel's library directory. For internal module use only. :param str plugins: The plugin.library.paths value
[ "Resolve", "embedded", "plugins", "from", "the", "wheel", "s", "library", "directory", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/__init__.py#L47-L102
train
confluentinc/confluent-kafka-python
examples/avro-cli.py
on_delivery
def on_delivery(err, msg, obj): """ Handle delivery reports served from producer.poll. This callback takes an extra argument, obj. This allows the original contents to be included for debugging purposes. """ if err is not None: print('Message {} delivery failed for user {} with error {}'.format( obj.id, obj.name, err)) else: print('Message {} successfully produced to {} [{}] at offset {}'.format( obj.id, msg.topic(), msg.partition(), msg.offset()))
python
def on_delivery(err, msg, obj): """ Handle delivery reports served from producer.poll. This callback takes an extra argument, obj. This allows the original contents to be included for debugging purposes. """ if err is not None: print('Message {} delivery failed for user {} with error {}'.format( obj.id, obj.name, err)) else: print('Message {} successfully produced to {} [{}] at offset {}'.format( obj.id, msg.topic(), msg.partition(), msg.offset()))
[ "def", "on_delivery", "(", "err", ",", "msg", ",", "obj", ")", ":", "if", "err", "is", "not", "None", ":", "print", "(", "'Message {} delivery failed for user {} with error {}'", ".", "format", "(", "obj", ".", "id", ",", "obj", ".", "name", ",", "err", ...
Handle delivery reports served from producer.poll. This callback takes an extra argument, obj. This allows the original contents to be included for debugging purposes.
[ "Handle", "delivery", "reports", "served", "from", "producer", ".", "poll", ".", "This", "callback", "takes", "an", "extra", "argument", "obj", ".", "This", "allows", "the", "original", "contents", "to", "be", "included", "for", "debugging", "purposes", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/avro-cli.py#L68-L79
train
confluentinc/confluent-kafka-python
examples/avro-cli.py
produce
def produce(topic, conf): """ Produce User records """ from confluent_kafka.avro import AvroProducer producer = AvroProducer(conf, default_value_schema=record_schema) print("Producing user records to topic {}. ^c to exit.".format(topic)) while True: # Instantiate new User, populate fields, produce record, execute callbacks. record = User() try: record.name = input("Enter name: ") record.favorite_number = int(input("Enter favorite number: ")) record.favorite_color = input("Enter favorite color: ") # The message passed to the delivery callback will already be serialized. # To aid in debugging we provide the original object to the delivery callback. producer.produce(topic=topic, value=record.to_dict(), callback=lambda err, msg, obj=record: on_delivery(err, msg, obj)) # Serve on_delivery callbacks from previous asynchronous produce() producer.poll(0) except KeyboardInterrupt: break except ValueError: print("Invalid input, discarding record...") continue print("\nFlushing records...") producer.flush()
python
def produce(topic, conf): """ Produce User records """ from confluent_kafka.avro import AvroProducer producer = AvroProducer(conf, default_value_schema=record_schema) print("Producing user records to topic {}. ^c to exit.".format(topic)) while True: # Instantiate new User, populate fields, produce record, execute callbacks. record = User() try: record.name = input("Enter name: ") record.favorite_number = int(input("Enter favorite number: ")) record.favorite_color = input("Enter favorite color: ") # The message passed to the delivery callback will already be serialized. # To aid in debugging we provide the original object to the delivery callback. producer.produce(topic=topic, value=record.to_dict(), callback=lambda err, msg, obj=record: on_delivery(err, msg, obj)) # Serve on_delivery callbacks from previous asynchronous produce() producer.poll(0) except KeyboardInterrupt: break except ValueError: print("Invalid input, discarding record...") continue print("\nFlushing records...") producer.flush()
[ "def", "produce", "(", "topic", ",", "conf", ")", ":", "from", "confluent_kafka", ".", "avro", "import", "AvroProducer", "producer", "=", "AvroProducer", "(", "conf", ",", "default_value_schema", "=", "record_schema", ")", "print", "(", "\"Producing user records t...
Produce User records
[ "Produce", "User", "records" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/avro-cli.py#L82-L113
train
confluentinc/confluent-kafka-python
examples/avro-cli.py
consume
def consume(topic, conf): """ Consume User records """ from confluent_kafka.avro import AvroConsumer from confluent_kafka.avro.serializer import SerializerError print("Consuming user records from topic {} with group {}. ^c to exit.".format(topic, conf["group.id"])) c = AvroConsumer(conf, reader_value_schema=record_schema) c.subscribe([topic]) while True: try: msg = c.poll(1) # There were no messages on the queue, continue polling if msg is None: continue if msg.error(): print("Consumer error: {}".format(msg.error())) continue record = User(msg.value()) print("name: {}\n\tfavorite_number: {}\n\tfavorite_color: {}\n".format( record.name, record.favorite_number, record.favorite_color)) except SerializerError as e: # Report malformed record, discard results, continue polling print("Message deserialization failed {}".format(e)) continue except KeyboardInterrupt: break print("Shutting down consumer..") c.close()
python
def consume(topic, conf): """ Consume User records """ from confluent_kafka.avro import AvroConsumer from confluent_kafka.avro.serializer import SerializerError print("Consuming user records from topic {} with group {}. ^c to exit.".format(topic, conf["group.id"])) c = AvroConsumer(conf, reader_value_schema=record_schema) c.subscribe([topic]) while True: try: msg = c.poll(1) # There were no messages on the queue, continue polling if msg is None: continue if msg.error(): print("Consumer error: {}".format(msg.error())) continue record = User(msg.value()) print("name: {}\n\tfavorite_number: {}\n\tfavorite_color: {}\n".format( record.name, record.favorite_number, record.favorite_color)) except SerializerError as e: # Report malformed record, discard results, continue polling print("Message deserialization failed {}".format(e)) continue except KeyboardInterrupt: break print("Shutting down consumer..") c.close()
[ "def", "consume", "(", "topic", ",", "conf", ")", ":", "from", "confluent_kafka", ".", "avro", "import", "AvroConsumer", "from", "confluent_kafka", ".", "avro", ".", "serializer", "import", "SerializerError", "print", "(", "\"Consuming user records from topic {} with ...
Consume User records
[ "Consume", "User", "records" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/examples/avro-cli.py#L116-L151
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/cached_schema_registry_client.py
CachedSchemaRegistryClient.register
def register(self, subject, avro_schema): """ POST /subjects/(string: subject)/versions Register a schema with the registry under the given subject and receive a schema id. avro_schema must be a parsed schema from the python avro library Multiple instances of the same schema will result in cache misses. :param str subject: subject name :param schema avro_schema: Avro schema to be registered :returns: schema_id :rtype: int """ schemas_to_id = self.subject_to_schema_ids[subject] schema_id = schemas_to_id.get(avro_schema, None) if schema_id is not None: return schema_id # send it up url = '/'.join([self.url, 'subjects', subject, 'versions']) # body is { schema : json_string } body = {'schema': json.dumps(avro_schema.to_json())} result, code = self._send_request(url, method='POST', body=body) if (code == 401 or code == 403): raise ClientError("Unauthorized access. Error code:" + str(code)) elif code == 409: raise ClientError("Incompatible Avro schema:" + str(code)) elif code == 422: raise ClientError("Invalid Avro schema:" + str(code)) elif not (code >= 200 and code <= 299): raise ClientError("Unable to register schema. Error code:" + str(code)) # result is a dict schema_id = result['id'] # cache it self._cache_schema(avro_schema, schema_id, subject) return schema_id
python
def register(self, subject, avro_schema): """ POST /subjects/(string: subject)/versions Register a schema with the registry under the given subject and receive a schema id. avro_schema must be a parsed schema from the python avro library Multiple instances of the same schema will result in cache misses. :param str subject: subject name :param schema avro_schema: Avro schema to be registered :returns: schema_id :rtype: int """ schemas_to_id = self.subject_to_schema_ids[subject] schema_id = schemas_to_id.get(avro_schema, None) if schema_id is not None: return schema_id # send it up url = '/'.join([self.url, 'subjects', subject, 'versions']) # body is { schema : json_string } body = {'schema': json.dumps(avro_schema.to_json())} result, code = self._send_request(url, method='POST', body=body) if (code == 401 or code == 403): raise ClientError("Unauthorized access. Error code:" + str(code)) elif code == 409: raise ClientError("Incompatible Avro schema:" + str(code)) elif code == 422: raise ClientError("Invalid Avro schema:" + str(code)) elif not (code >= 200 and code <= 299): raise ClientError("Unable to register schema. Error code:" + str(code)) # result is a dict schema_id = result['id'] # cache it self._cache_schema(avro_schema, schema_id, subject) return schema_id
[ "def", "register", "(", "self", ",", "subject", ",", "avro_schema", ")", ":", "schemas_to_id", "=", "self", ".", "subject_to_schema_ids", "[", "subject", "]", "schema_id", "=", "schemas_to_id", ".", "get", "(", "avro_schema", ",", "None", ")", "if", "schema_...
POST /subjects/(string: subject)/versions Register a schema with the registry under the given subject and receive a schema id. avro_schema must be a parsed schema from the python avro library Multiple instances of the same schema will result in cache misses. :param str subject: subject name :param schema avro_schema: Avro schema to be registered :returns: schema_id :rtype: int
[ "POST", "/", "subjects", "/", "(", "string", ":", "subject", ")", "/", "versions", "Register", "a", "schema", "with", "the", "registry", "under", "the", "given", "subject", "and", "receive", "a", "schema", "id", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/cached_schema_registry_client.py#L192-L230
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/cached_schema_registry_client.py
CachedSchemaRegistryClient.delete_subject
def delete_subject(self, subject): """ DELETE /subjects/(string: subject) Deletes the specified subject and its associated compatibility level if registered. It is recommended to use this API only when a topic needs to be recycled or in development environments. :param subject: subject name :returns: version of the schema deleted under this subject :rtype: (int) """ url = '/'.join([self.url, 'subjects', subject]) result, code = self._send_request(url, method="DELETE") if not (code >= 200 and code <= 299): raise ClientError('Unable to delete subject: {}'.format(result)) return result
python
def delete_subject(self, subject): """ DELETE /subjects/(string: subject) Deletes the specified subject and its associated compatibility level if registered. It is recommended to use this API only when a topic needs to be recycled or in development environments. :param subject: subject name :returns: version of the schema deleted under this subject :rtype: (int) """ url = '/'.join([self.url, 'subjects', subject]) result, code = self._send_request(url, method="DELETE") if not (code >= 200 and code <= 299): raise ClientError('Unable to delete subject: {}'.format(result)) return result
[ "def", "delete_subject", "(", "self", ",", "subject", ")", ":", "url", "=", "'/'", ".", "join", "(", "[", "self", ".", "url", ",", "'subjects'", ",", "subject", "]", ")", "result", ",", "code", "=", "self", ".", "_send_request", "(", "url", ",", "m...
DELETE /subjects/(string: subject) Deletes the specified subject and its associated compatibility level if registered. It is recommended to use this API only when a topic needs to be recycled or in development environments. :param subject: subject name :returns: version of the schema deleted under this subject :rtype: (int)
[ "DELETE", "/", "subjects", "/", "(", "string", ":", "subject", ")", "Deletes", "the", "specified", "subject", "and", "its", "associated", "compatibility", "level", "if", "registered", ".", "It", "is", "recommended", "to", "use", "this", "API", "only", "when"...
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/cached_schema_registry_client.py#L232-L247
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/cached_schema_registry_client.py
CachedSchemaRegistryClient.get_by_id
def get_by_id(self, schema_id): """ GET /schemas/ids/{int: id} Retrieve a parsed avro schema by id or None if not found :param int schema_id: int value :returns: Avro schema :rtype: schema """ if schema_id in self.id_to_schema: return self.id_to_schema[schema_id] # fetch from the registry url = '/'.join([self.url, 'schemas', 'ids', str(schema_id)]) result, code = self._send_request(url) if code == 404: log.error("Schema not found:" + str(code)) return None elif not (code >= 200 and code <= 299): log.error("Unable to get schema for the specific ID:" + str(code)) return None else: # need to parse the schema schema_str = result.get("schema") try: result = loads(schema_str) # cache it self._cache_schema(result, schema_id) return result except ClientError as e: # bad schema - should not happen raise ClientError("Received bad schema (id %s) from registry: %s" % (schema_id, e))
python
def get_by_id(self, schema_id): """ GET /schemas/ids/{int: id} Retrieve a parsed avro schema by id or None if not found :param int schema_id: int value :returns: Avro schema :rtype: schema """ if schema_id in self.id_to_schema: return self.id_to_schema[schema_id] # fetch from the registry url = '/'.join([self.url, 'schemas', 'ids', str(schema_id)]) result, code = self._send_request(url) if code == 404: log.error("Schema not found:" + str(code)) return None elif not (code >= 200 and code <= 299): log.error("Unable to get schema for the specific ID:" + str(code)) return None else: # need to parse the schema schema_str = result.get("schema") try: result = loads(schema_str) # cache it self._cache_schema(result, schema_id) return result except ClientError as e: # bad schema - should not happen raise ClientError("Received bad schema (id %s) from registry: %s" % (schema_id, e))
[ "def", "get_by_id", "(", "self", ",", "schema_id", ")", ":", "if", "schema_id", "in", "self", ".", "id_to_schema", ":", "return", "self", ".", "id_to_schema", "[", "schema_id", "]", "# fetch from the registry", "url", "=", "'/'", ".", "join", "(", "[", "se...
GET /schemas/ids/{int: id} Retrieve a parsed avro schema by id or None if not found :param int schema_id: int value :returns: Avro schema :rtype: schema
[ "GET", "/", "schemas", "/", "ids", "/", "{", "int", ":", "id", "}", "Retrieve", "a", "parsed", "avro", "schema", "by", "id", "or", "None", "if", "not", "found", ":", "param", "int", "schema_id", ":", "int", "value", ":", "returns", ":", "Avro", "sc...
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/cached_schema_registry_client.py#L249-L279
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/cached_schema_registry_client.py
CachedSchemaRegistryClient.get_version
def get_version(self, subject, avro_schema): """ POST /subjects/(string: subject) Get the version of a schema for a given subject. Returns None if not found. :param str subject: subject name :param: schema avro_schema: Avro schema :returns: version :rtype: int """ schemas_to_version = self.subject_to_schema_versions[subject] version = schemas_to_version.get(avro_schema, None) if version is not None: return version url = '/'.join([self.url, 'subjects', subject]) body = {'schema': json.dumps(avro_schema.to_json())} result, code = self._send_request(url, method='POST', body=body) if code == 404: log.error("Not found:" + str(code)) return None elif not (code >= 200 and code <= 299): log.error("Unable to get version of a schema:" + str(code)) return None schema_id = result['id'] version = result['version'] self._cache_schema(avro_schema, schema_id, subject, version) return version
python
def get_version(self, subject, avro_schema): """ POST /subjects/(string: subject) Get the version of a schema for a given subject. Returns None if not found. :param str subject: subject name :param: schema avro_schema: Avro schema :returns: version :rtype: int """ schemas_to_version = self.subject_to_schema_versions[subject] version = schemas_to_version.get(avro_schema, None) if version is not None: return version url = '/'.join([self.url, 'subjects', subject]) body = {'schema': json.dumps(avro_schema.to_json())} result, code = self._send_request(url, method='POST', body=body) if code == 404: log.error("Not found:" + str(code)) return None elif not (code >= 200 and code <= 299): log.error("Unable to get version of a schema:" + str(code)) return None schema_id = result['id'] version = result['version'] self._cache_schema(avro_schema, schema_id, subject, version) return version
[ "def", "get_version", "(", "self", ",", "subject", ",", "avro_schema", ")", ":", "schemas_to_version", "=", "self", ".", "subject_to_schema_versions", "[", "subject", "]", "version", "=", "schemas_to_version", ".", "get", "(", "avro_schema", ",", "None", ")", ...
POST /subjects/(string: subject) Get the version of a schema for a given subject. Returns None if not found. :param str subject: subject name :param: schema avro_schema: Avro schema :returns: version :rtype: int
[ "POST", "/", "subjects", "/", "(", "string", ":", "subject", ")" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/cached_schema_registry_client.py#L321-L351
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/cached_schema_registry_client.py
CachedSchemaRegistryClient.update_compatibility
def update_compatibility(self, level, subject=None): """ PUT /config/(string: subject) Update the compatibility level for a subject. Level must be one of: :param str level: ex: 'NONE','FULL','FORWARD', or 'BACKWARD' """ if level not in VALID_LEVELS: raise ClientError("Invalid level specified: %s" % (str(level))) url = '/'.join([self.url, 'config']) if subject: url += '/' + subject body = {"compatibility": level} result, code = self._send_request(url, method='PUT', body=body) if code >= 200 and code <= 299: return result['compatibility'] else: raise ClientError("Unable to update level: %s. Error code: %d" % (str(level)), code)
python
def update_compatibility(self, level, subject=None): """ PUT /config/(string: subject) Update the compatibility level for a subject. Level must be one of: :param str level: ex: 'NONE','FULL','FORWARD', or 'BACKWARD' """ if level not in VALID_LEVELS: raise ClientError("Invalid level specified: %s" % (str(level))) url = '/'.join([self.url, 'config']) if subject: url += '/' + subject body = {"compatibility": level} result, code = self._send_request(url, method='PUT', body=body) if code >= 200 and code <= 299: return result['compatibility'] else: raise ClientError("Unable to update level: %s. Error code: %d" % (str(level)), code)
[ "def", "update_compatibility", "(", "self", ",", "level", ",", "subject", "=", "None", ")", ":", "if", "level", "not", "in", "VALID_LEVELS", ":", "raise", "ClientError", "(", "\"Invalid level specified: %s\"", "%", "(", "str", "(", "level", ")", ")", ")", ...
PUT /config/(string: subject) Update the compatibility level for a subject. Level must be one of: :param str level: ex: 'NONE','FULL','FORWARD', or 'BACKWARD'
[ "PUT", "/", "config", "/", "(", "string", ":", "subject", ")" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/cached_schema_registry_client.py#L385-L405
train
confluentinc/confluent-kafka-python
confluent_kafka/avro/cached_schema_registry_client.py
CachedSchemaRegistryClient.get_compatibility
def get_compatibility(self, subject=None): """ GET /config Get the current compatibility level for a subject. Result will be one of: :param str subject: subject name :raises ClientError: if the request was unsuccessful or an invalid compatibility level was returned :returns: one of 'NONE','FULL','FORWARD', or 'BACKWARD' :rtype: bool """ url = '/'.join([self.url, 'config']) if subject: url = '/'.join([url, subject]) result, code = self._send_request(url) is_successful_request = code >= 200 and code <= 299 if not is_successful_request: raise ClientError('Unable to fetch compatibility level. Error code: %d' % code) compatibility = result.get('compatibilityLevel', None) if compatibility not in VALID_LEVELS: if compatibility is None: error_msg_suffix = 'No compatibility was returned' else: error_msg_suffix = str(compatibility) raise ClientError('Invalid compatibility level received: %s' % error_msg_suffix) return compatibility
python
def get_compatibility(self, subject=None): """ GET /config Get the current compatibility level for a subject. Result will be one of: :param str subject: subject name :raises ClientError: if the request was unsuccessful or an invalid compatibility level was returned :returns: one of 'NONE','FULL','FORWARD', or 'BACKWARD' :rtype: bool """ url = '/'.join([self.url, 'config']) if subject: url = '/'.join([url, subject]) result, code = self._send_request(url) is_successful_request = code >= 200 and code <= 299 if not is_successful_request: raise ClientError('Unable to fetch compatibility level. Error code: %d' % code) compatibility = result.get('compatibilityLevel', None) if compatibility not in VALID_LEVELS: if compatibility is None: error_msg_suffix = 'No compatibility was returned' else: error_msg_suffix = str(compatibility) raise ClientError('Invalid compatibility level received: %s' % error_msg_suffix) return compatibility
[ "def", "get_compatibility", "(", "self", ",", "subject", "=", "None", ")", ":", "url", "=", "'/'", ".", "join", "(", "[", "self", ".", "url", ",", "'config'", "]", ")", "if", "subject", ":", "url", "=", "'/'", ".", "join", "(", "[", "url", ",", ...
GET /config Get the current compatibility level for a subject. Result will be one of: :param str subject: subject name :raises ClientError: if the request was unsuccessful or an invalid compatibility level was returned :returns: one of 'NONE','FULL','FORWARD', or 'BACKWARD' :rtype: bool
[ "GET", "/", "config", "Get", "the", "current", "compatibility", "level", "for", "a", "subject", ".", "Result", "will", "be", "one", "of", ":" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/confluent_kafka/avro/cached_schema_registry_client.py#L407-L434
train
confluentinc/confluent-kafka-python
tools/download-s3.py
Artifact.download
def download(self, dirpath): """ Download artifact from S3 and store in dirpath directory. If the artifact is already downloaded nothing is done. """ if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0: return print('Downloading %s -> %s' % (self.path, self.lpath)) if dry_run: return self.arts.s3_bucket.download_file(self.path, self.lpath)
python
def download(self, dirpath): """ Download artifact from S3 and store in dirpath directory. If the artifact is already downloaded nothing is done. """ if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0: return print('Downloading %s -> %s' % (self.path, self.lpath)) if dry_run: return self.arts.s3_bucket.download_file(self.path, self.lpath)
[ "def", "download", "(", "self", ",", "dirpath", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "lpath", ")", "and", "os", ".", "path", ".", "getsize", "(", "self", ".", "lpath", ")", ">", "0", ":", "return", "print", "(", "...
Download artifact from S3 and store in dirpath directory. If the artifact is already downloaded nothing is done.
[ "Download", "artifact", "from", "S3", "and", "store", "in", "dirpath", "directory", ".", "If", "the", "artifact", "is", "already", "downloaded", "nothing", "is", "done", "." ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/tools/download-s3.py#L46-L54
train
confluentinc/confluent-kafka-python
tools/download-s3.py
Artifacts.collect_single_s3
def collect_single_s3(self, path): """ Collect single S3 artifact :param: path string: S3 path """ # The S3 folder contains the tokens needed to perform # matching of project, gitref, etc. folder = os.path.dirname(path) rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder) if rinfo is None or len(rinfo) == 0: # print('Incorrect folder/file name format for %s' % folder) return None info = dict(rinfo) # Ignore AppVeyor Debug builds if info.get('bldtype', '').lower() == 'debug': print('Ignoring debug artifact %s' % folder) return None tag = info.get('tag', None) if tag is not None and (len(tag) == 0 or tag.startswith('$(')): # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME) # with an empty value when not set, it leaves that token # in the string - so translate that to no tag. tag = None sha = info.get('sha', None) # Match tag or sha to gitref if (tag is not None and tag == self.gitref) or (sha is not None and sha.startswith(self.gitref)): return Artifact(self, path, info) return None
python
def collect_single_s3(self, path): """ Collect single S3 artifact :param: path string: S3 path """ # The S3 folder contains the tokens needed to perform # matching of project, gitref, etc. folder = os.path.dirname(path) rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder) if rinfo is None or len(rinfo) == 0: # print('Incorrect folder/file name format for %s' % folder) return None info = dict(rinfo) # Ignore AppVeyor Debug builds if info.get('bldtype', '').lower() == 'debug': print('Ignoring debug artifact %s' % folder) return None tag = info.get('tag', None) if tag is not None and (len(tag) == 0 or tag.startswith('$(')): # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME) # with an empty value when not set, it leaves that token # in the string - so translate that to no tag. tag = None sha = info.get('sha', None) # Match tag or sha to gitref if (tag is not None and tag == self.gitref) or (sha is not None and sha.startswith(self.gitref)): return Artifact(self, path, info) return None
[ "def", "collect_single_s3", "(", "self", ",", "path", ")", ":", "# The S3 folder contains the tokens needed to perform", "# matching of project, gitref, etc.", "folder", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "rinfo", "=", "re", ".", "findall", "(...
Collect single S3 artifact :param: path string: S3 path
[ "Collect", "single", "S3", "artifact", ":", "param", ":", "path", "string", ":", "S3", "path" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/tools/download-s3.py#L68-L102
train
confluentinc/confluent-kafka-python
tools/download-s3.py
Artifacts.collect_s3
def collect_s3(self): """ Collect and download build-artifacts from S3 based on git reference """ print('Collecting artifacts matching tag/sha %s from S3 bucket %s' % (self.gitref, s3_bucket)) self.s3 = boto3.resource('s3') self.s3_bucket = self.s3.Bucket(s3_bucket) self.s3.meta.client.head_bucket(Bucket=s3_bucket) for key in self.s3_bucket.objects.all(): self.collect_single_s3(key.key) for a in self.artifacts: a.download(self.dlpath)
python
def collect_s3(self): """ Collect and download build-artifacts from S3 based on git reference """ print('Collecting artifacts matching tag/sha %s from S3 bucket %s' % (self.gitref, s3_bucket)) self.s3 = boto3.resource('s3') self.s3_bucket = self.s3.Bucket(s3_bucket) self.s3.meta.client.head_bucket(Bucket=s3_bucket) for key in self.s3_bucket.objects.all(): self.collect_single_s3(key.key) for a in self.artifacts: a.download(self.dlpath)
[ "def", "collect_s3", "(", "self", ")", ":", "print", "(", "'Collecting artifacts matching tag/sha %s from S3 bucket %s'", "%", "(", "self", ".", "gitref", ",", "s3_bucket", ")", ")", "self", ".", "s3", "=", "boto3", ".", "resource", "(", "'s3'", ")", "self", ...
Collect and download build-artifacts from S3 based on git reference
[ "Collect", "and", "download", "build", "-", "artifacts", "from", "S3", "based", "on", "git", "reference" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/tools/download-s3.py#L104-L114
train
confluentinc/confluent-kafka-python
tools/download-s3.py
Artifacts.collect_local
def collect_local(self, path): """ Collect artifacts from a local directory possibly previously collected from s3 """ for f in os.listdir(path): lpath = os.path.join(path, f) if not os.path.isfile(lpath): continue Artifact(self, lpath)
python
def collect_local(self, path): """ Collect artifacts from a local directory possibly previously collected from s3 """ for f in os.listdir(path): lpath = os.path.join(path, f) if not os.path.isfile(lpath): continue Artifact(self, lpath)
[ "def", "collect_local", "(", "self", ",", "path", ")", ":", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", ":", "lpath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")", "if", "not", "os", ".", "path", ".", "isfile", ...
Collect artifacts from a local directory possibly previously collected from s3
[ "Collect", "artifacts", "from", "a", "local", "directory", "possibly", "previously", "collected", "from", "s3" ]
5a8aeb741609e61eaccafff2a67fa494dd549e8b
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/tools/download-s3.py#L116-L123
train
saltstack/salt
salt/modules/ddns.py
_config
def _config(name, key=None, **kwargs): ''' Return a value for 'name' from command line args then config file options. Specify 'key' if the config file option is not the same as 'name'. ''' if key is None: key = name if name in kwargs: value = kwargs[name] else: value = __salt__['config.option']('ddns.{0}'.format(key)) if not value: value = None return value
python
def _config(name, key=None, **kwargs): ''' Return a value for 'name' from command line args then config file options. Specify 'key' if the config file option is not the same as 'name'. ''' if key is None: key = name if name in kwargs: value = kwargs[name] else: value = __salt__['config.option']('ddns.{0}'.format(key)) if not value: value = None return value
[ "def", "_config", "(", "name", ",", "key", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "key", "is", "None", ":", "key", "=", "name", "if", "name", "in", "kwargs", ":", "value", "=", "kwargs", "[", "name", "]", "else", ":", "value", "=...
Return a value for 'name' from command line args then config file options. Specify 'key' if the config file option is not the same as 'name'.
[ "Return", "a", "value", "for", "name", "from", "command", "line", "args", "then", "config", "file", "options", ".", "Specify", "key", "if", "the", "config", "file", "option", "is", "not", "the", "same", "as", "name", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ddns.py#L55-L68
train
saltstack/salt
salt/modules/ddns.py
add_host
def add_host(zone, name, ttl, ip, nameserver='127.0.0.1', replace=True, timeout=5, port=53, **kwargs): ''' Add, replace, or update the A and PTR (reverse) records for a host. CLI Example: .. code-block:: bash salt ns1 ddns.add_host example.com host1 60 10.1.1.1 ''' res = update(zone, name, ttl, 'A', ip, nameserver, timeout, replace, port, **kwargs) if res is False: return False fqdn = '{0}.{1}.'.format(name, zone) parts = ip.split('.')[::-1] popped = [] # Iterate over possible reverse zones while len(parts) > 1: p = parts.pop(0) popped.append(p) zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.') name = '.'.join(popped) ptr = update(zone, name, ttl, 'PTR', fqdn, nameserver, timeout, replace, port, **kwargs) if ptr: return True return res
python
def add_host(zone, name, ttl, ip, nameserver='127.0.0.1', replace=True, timeout=5, port=53, **kwargs): ''' Add, replace, or update the A and PTR (reverse) records for a host. CLI Example: .. code-block:: bash salt ns1 ddns.add_host example.com host1 60 10.1.1.1 ''' res = update(zone, name, ttl, 'A', ip, nameserver, timeout, replace, port, **kwargs) if res is False: return False fqdn = '{0}.{1}.'.format(name, zone) parts = ip.split('.')[::-1] popped = [] # Iterate over possible reverse zones while len(parts) > 1: p = parts.pop(0) popped.append(p) zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.') name = '.'.join(popped) ptr = update(zone, name, ttl, 'PTR', fqdn, nameserver, timeout, replace, port, **kwargs) if ptr: return True return res
[ "def", "add_host", "(", "zone", ",", "name", ",", "ttl", ",", "ip", ",", "nameserver", "=", "'127.0.0.1'", ",", "replace", "=", "True", ",", "timeout", "=", "5", ",", "port", "=", "53", ",", "*", "*", "kwargs", ")", ":", "res", "=", "update", "("...
Add, replace, or update the A and PTR (reverse) records for a host. CLI Example: .. code-block:: bash salt ns1 ddns.add_host example.com host1 60 10.1.1.1
[ "Add", "replace", "or", "update", "the", "A", "and", "PTR", "(", "reverse", ")", "records", "for", "a", "host", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ddns.py#L79-L109
train
saltstack/salt
salt/modules/ddns.py
delete_host
def delete_host(zone, name, nameserver='127.0.0.1', timeout=5, port=53, **kwargs): ''' Delete the forward and reverse records for a host. Returns true if any records are deleted. CLI Example: .. code-block:: bash salt ns1 ddns.delete_host example.com host1 ''' fqdn = '{0}.{1}'.format(name, zone) request = dns.message.make_query(fqdn, 'A') answer = dns.query.udp(request, nameserver, timeout, port) try: ips = [i.address for i in answer.answer[0].items] except IndexError: ips = [] res = delete(zone, name, nameserver=nameserver, timeout=timeout, port=port, **kwargs) fqdn = fqdn + '.' for ip in ips: parts = ip.split('.')[::-1] popped = [] # Iterate over possible reverse zones while len(parts) > 1: p = parts.pop(0) popped.append(p) zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.') name = '.'.join(popped) ptr = delete(zone, name, 'PTR', fqdn, nameserver=nameserver, timeout=timeout, port=port, **kwargs) if ptr: res = True return res
python
def delete_host(zone, name, nameserver='127.0.0.1', timeout=5, port=53, **kwargs): ''' Delete the forward and reverse records for a host. Returns true if any records are deleted. CLI Example: .. code-block:: bash salt ns1 ddns.delete_host example.com host1 ''' fqdn = '{0}.{1}'.format(name, zone) request = dns.message.make_query(fqdn, 'A') answer = dns.query.udp(request, nameserver, timeout, port) try: ips = [i.address for i in answer.answer[0].items] except IndexError: ips = [] res = delete(zone, name, nameserver=nameserver, timeout=timeout, port=port, **kwargs) fqdn = fqdn + '.' for ip in ips: parts = ip.split('.')[::-1] popped = [] # Iterate over possible reverse zones while len(parts) > 1: p = parts.pop(0) popped.append(p) zone = '{0}.{1}'.format('.'.join(parts), 'in-addr.arpa.') name = '.'.join(popped) ptr = delete(zone, name, 'PTR', fqdn, nameserver=nameserver, timeout=timeout, port=port, **kwargs) if ptr: res = True return res
[ "def", "delete_host", "(", "zone", ",", "name", ",", "nameserver", "=", "'127.0.0.1'", ",", "timeout", "=", "5", ",", "port", "=", "53", ",", "*", "*", "kwargs", ")", ":", "fqdn", "=", "'{0}.{1}'", ".", "format", "(", "name", ",", "zone", ")", "req...
Delete the forward and reverse records for a host. Returns true if any records are deleted. CLI Example: .. code-block:: bash salt ns1 ddns.delete_host example.com host1
[ "Delete", "the", "forward", "and", "reverse", "records", "for", "a", "host", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ddns.py#L112-L151
train
saltstack/salt
salt/modules/ddns.py
update
def update(zone, name, ttl, rdtype, data, nameserver='127.0.0.1', timeout=5, replace=False, port=53, **kwargs): ''' Add, replace, or update a DNS record. nameserver must be an IP address and the minion running this module must have update privileges on that server. If replace is true, first deletes all records for this name and type. CLI Example: .. code-block:: bash salt ns1 ddns.update example.com host1 60 A 10.0.0.1 ''' name = six.text_type(name) if name[-1:] == '.': fqdn = name else: fqdn = '{0}.{1}'.format(name, zone) request = dns.message.make_query(fqdn, rdtype) answer = dns.query.udp(request, nameserver, timeout, port) rdtype = dns.rdatatype.from_text(rdtype) rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data) keyring = _get_keyring(_config('keyfile', **kwargs)) keyname = _config('keyname', **kwargs) keyalgorithm = _config('keyalgorithm', **kwargs) or 'HMAC-MD5.SIG-ALG.REG.INT' is_exist = False for rrset in answer.answer: if rdata in rrset.items: if ttl == rrset.ttl: if len(answer.answer) >= 1 or len(rrset.items) >= 1: is_exist = True break dns_update = dns.update.Update(zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm) if replace: dns_update.replace(name, ttl, rdata) elif not is_exist: dns_update.add(name, ttl, rdata) else: return None answer = dns.query.udp(dns_update, nameserver, timeout, port) if answer.rcode() > 0: return False return True
python
def update(zone, name, ttl, rdtype, data, nameserver='127.0.0.1', timeout=5, replace=False, port=53, **kwargs): ''' Add, replace, or update a DNS record. nameserver must be an IP address and the minion running this module must have update privileges on that server. If replace is true, first deletes all records for this name and type. CLI Example: .. code-block:: bash salt ns1 ddns.update example.com host1 60 A 10.0.0.1 ''' name = six.text_type(name) if name[-1:] == '.': fqdn = name else: fqdn = '{0}.{1}'.format(name, zone) request = dns.message.make_query(fqdn, rdtype) answer = dns.query.udp(request, nameserver, timeout, port) rdtype = dns.rdatatype.from_text(rdtype) rdata = dns.rdata.from_text(dns.rdataclass.IN, rdtype, data) keyring = _get_keyring(_config('keyfile', **kwargs)) keyname = _config('keyname', **kwargs) keyalgorithm = _config('keyalgorithm', **kwargs) or 'HMAC-MD5.SIG-ALG.REG.INT' is_exist = False for rrset in answer.answer: if rdata in rrset.items: if ttl == rrset.ttl: if len(answer.answer) >= 1 or len(rrset.items) >= 1: is_exist = True break dns_update = dns.update.Update(zone, keyring=keyring, keyname=keyname, keyalgorithm=keyalgorithm) if replace: dns_update.replace(name, ttl, rdata) elif not is_exist: dns_update.add(name, ttl, rdata) else: return None answer = dns.query.udp(dns_update, nameserver, timeout, port) if answer.rcode() > 0: return False return True
[ "def", "update", "(", "zone", ",", "name", ",", "ttl", ",", "rdtype", ",", "data", ",", "nameserver", "=", "'127.0.0.1'", ",", "timeout", "=", "5", ",", "replace", "=", "False", ",", "port", "=", "53", ",", "*", "*", "kwargs", ")", ":", "name", "...
Add, replace, or update a DNS record. nameserver must be an IP address and the minion running this module must have update privileges on that server. If replace is true, first deletes all records for this name and type. CLI Example: .. code-block:: bash salt ns1 ddns.update example.com host1 60 A 10.0.0.1
[ "Add", "replace", "or", "update", "a", "DNS", "record", ".", "nameserver", "must", "be", "an", "IP", "address", "and", "the", "minion", "running", "this", "module", "must", "have", "update", "privileges", "on", "that", "server", ".", "If", "replace", "is",...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ddns.py#L154-L205
train
saltstack/salt
salt/modules/ansiblegate.py
_set_callables
def _set_callables(modules): ''' Set all Ansible modules callables :return: ''' def _set_function(cmd_name, doc): ''' Create a Salt function for the Ansible module. ''' def _cmd(*args, **kw): ''' Call an Ansible module as a function from the Salt. ''' kwargs = {} if kw.get('__pub_arg'): for _kw in kw.get('__pub_arg', []): if isinstance(_kw, dict): kwargs = _kw break return _caller.call(cmd_name, *args, **kwargs) _cmd.__doc__ = doc return _cmd for mod in modules: setattr(sys.modules[__name__], mod, _set_function(mod, 'Available'))
python
def _set_callables(modules): ''' Set all Ansible modules callables :return: ''' def _set_function(cmd_name, doc): ''' Create a Salt function for the Ansible module. ''' def _cmd(*args, **kw): ''' Call an Ansible module as a function from the Salt. ''' kwargs = {} if kw.get('__pub_arg'): for _kw in kw.get('__pub_arg', []): if isinstance(_kw, dict): kwargs = _kw break return _caller.call(cmd_name, *args, **kwargs) _cmd.__doc__ = doc return _cmd for mod in modules: setattr(sys.modules[__name__], mod, _set_function(mod, 'Available'))
[ "def", "_set_callables", "(", "modules", ")", ":", "def", "_set_function", "(", "cmd_name", ",", "doc", ")", ":", "'''\n Create a Salt function for the Ansible module.\n '''", "def", "_cmd", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "'''\n ...
Set all Ansible modules callables :return:
[ "Set", "all", "Ansible", "modules", "callables", ":", "return", ":" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L190-L215
train
saltstack/salt
salt/modules/ansiblegate.py
help
def help(module=None, *args): ''' Display help on Ansible standard module. :param module: :return: ''' if not module: raise CommandExecutionError('Please tell me what module you want to have helped with. ' 'Or call "ansible.list" to know what is available.') try: module = _resolver.load_module(module) except (ImportError, LoaderError) as err: raise CommandExecutionError('Module "{0}" is currently not functional on your system.'.format(module)) doc = {} ret = {} for docset in module.DOCUMENTATION.split('---'): try: docset = salt.utils.yaml.safe_load(docset) if docset: doc.update(docset) except Exception as err: log.error("Error parsing doc section: %s", err) if not args: if 'description' in doc: description = doc.get('description') or '' del doc['description'] ret['Description'] = description ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = doc.keys() else: for arg in args: info = doc.get(arg) if info is not None: ret[arg] = info return ret
python
def help(module=None, *args): ''' Display help on Ansible standard module. :param module: :return: ''' if not module: raise CommandExecutionError('Please tell me what module you want to have helped with. ' 'Or call "ansible.list" to know what is available.') try: module = _resolver.load_module(module) except (ImportError, LoaderError) as err: raise CommandExecutionError('Module "{0}" is currently not functional on your system.'.format(module)) doc = {} ret = {} for docset in module.DOCUMENTATION.split('---'): try: docset = salt.utils.yaml.safe_load(docset) if docset: doc.update(docset) except Exception as err: log.error("Error parsing doc section: %s", err) if not args: if 'description' in doc: description = doc.get('description') or '' del doc['description'] ret['Description'] = description ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = doc.keys() else: for arg in args: info = doc.get(arg) if info is not None: ret[arg] = info return ret
[ "def", "help", "(", "module", "=", "None", ",", "*", "args", ")", ":", "if", "not", "module", ":", "raise", "CommandExecutionError", "(", "'Please tell me what module you want to have helped with. '", "'Or call \"ansible.list\" to know what is available.'", ")", "try", ":...
Display help on Ansible standard module. :param module: :return:
[ "Display", "help", "on", "Ansible", "standard", "module", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L237-L273
train
saltstack/salt
salt/modules/ansiblegate.py
playbooks
def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None, flush_cache=False, forks=5, inventory=None, limit=None, list_hosts=False, list_tags=False, list_tasks=False, module_path=None, skip_tags=None, start_at_task=None, syntax_check=False, tags=None, playbook_kwargs=None): ''' Run Ansible Playbooks :param playbook: Which playbook to run. :param rundir: Directory to run `ansible-playbook` in. (Default: None) :param check: don't make any changes; instead, try to predict some of the changes that may occur (Default: False) :param diff: when changing (small) files and templates, show the differences in those files; works great with --check (default: False) :param extra_vars: set additional variables as key=value or YAML/JSON, if filename prepend with @, (default: None) :param flush_cache: clear the fact cache for every host in inventory (default: False) :param forks: specify number of parallel processes to use (Default: 5) :param inventory: specify inventory host path or comma separated host list. (Default: None) (Ansible's default is /etc/ansible/hosts) :param limit: further limit selected hosts to an additional pattern (Default: None) :param list_hosts: outputs a list of matching hosts; does not execute anything else (Default: False) :param list_tags: list all available tags (Default: False) :param list_tasks: list all tasks that would be executed (Default: False) :param module_path: prepend colon-separated path(s) to module library. (Default: None) :param skip_tags: only run plays and tasks whose tags do not match these values (Default: False) :param start_at_task: start the playbook at the task matching this name (Default: None) :param: syntax_check: perform a syntax check on the playbook, but do not execute it (Default: False) :param tags: only run plays and tasks tagged with these values (Default: None) :return: Playbook return CLI Example: .. code-block:: bash salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml ''' command = ['ansible-playbook', playbook] if check: command.append('--check') if diff: command.append('--diff') if isinstance(extra_vars, dict): command.append("--extra-vars='{0}'".format(json.dumps(extra_vars))) elif isinstance(extra_vars, six.text_type) and extra_vars.startswith('@'): command.append('--extra-vars={0}'.format(extra_vars)) if flush_cache: command.append('--flush-cache') if inventory: command.append('--inventory={0}'.format(inventory)) if limit: command.append('--limit={0}'.format(limit)) if list_hosts: command.append('--list-hosts') if list_tags: command.append('--list-tags') if list_tasks: command.append('--list-tasks') if module_path: command.append('--module-path={0}'.format(module_path)) if skip_tags: command.append('--skip-tags={0}'.format(skip_tags)) if start_at_task: command.append('--start-at-task={0}'.format(start_at_task)) if syntax_check: command.append('--syntax-check') if tags: command.append('--tags={0}'.format(tags)) if playbook_kwargs: for key, value in six.iteritems(playbook_kwargs): key = key.replace('_', '-') if value is True: command.append('--{0}'.format(key)) elif isinstance(value, six.text_type): command.append('--{0}={1}'.format(key, value)) elif isinstance(value, dict): command.append('--{0}={1}'.format(key, json.dumps(value))) command.append('--forks={0}'.format(forks)) cmd_kwargs = { 'env': {'ANSIBLE_STDOUT_CALLBACK': 'json', 'ANSIBLE_RETRY_FILES_ENABLED': '0'}, 'cwd': rundir, 'cmd': ' '.join(command) } ret = __salt__['cmd.run_all'](**cmd_kwargs) log.debug('Ansible Playbook Return: %s', ret) retdata = json.loads(ret['stdout']) if ret['retcode']: __context__['retcode'] = ret['retcode'] return retdata
python
def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None, flush_cache=False, forks=5, inventory=None, limit=None, list_hosts=False, list_tags=False, list_tasks=False, module_path=None, skip_tags=None, start_at_task=None, syntax_check=False, tags=None, playbook_kwargs=None): ''' Run Ansible Playbooks :param playbook: Which playbook to run. :param rundir: Directory to run `ansible-playbook` in. (Default: None) :param check: don't make any changes; instead, try to predict some of the changes that may occur (Default: False) :param diff: when changing (small) files and templates, show the differences in those files; works great with --check (default: False) :param extra_vars: set additional variables as key=value or YAML/JSON, if filename prepend with @, (default: None) :param flush_cache: clear the fact cache for every host in inventory (default: False) :param forks: specify number of parallel processes to use (Default: 5) :param inventory: specify inventory host path or comma separated host list. (Default: None) (Ansible's default is /etc/ansible/hosts) :param limit: further limit selected hosts to an additional pattern (Default: None) :param list_hosts: outputs a list of matching hosts; does not execute anything else (Default: False) :param list_tags: list all available tags (Default: False) :param list_tasks: list all tasks that would be executed (Default: False) :param module_path: prepend colon-separated path(s) to module library. (Default: None) :param skip_tags: only run plays and tasks whose tags do not match these values (Default: False) :param start_at_task: start the playbook at the task matching this name (Default: None) :param: syntax_check: perform a syntax check on the playbook, but do not execute it (Default: False) :param tags: only run plays and tasks tagged with these values (Default: None) :return: Playbook return CLI Example: .. code-block:: bash salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml ''' command = ['ansible-playbook', playbook] if check: command.append('--check') if diff: command.append('--diff') if isinstance(extra_vars, dict): command.append("--extra-vars='{0}'".format(json.dumps(extra_vars))) elif isinstance(extra_vars, six.text_type) and extra_vars.startswith('@'): command.append('--extra-vars={0}'.format(extra_vars)) if flush_cache: command.append('--flush-cache') if inventory: command.append('--inventory={0}'.format(inventory)) if limit: command.append('--limit={0}'.format(limit)) if list_hosts: command.append('--list-hosts') if list_tags: command.append('--list-tags') if list_tasks: command.append('--list-tasks') if module_path: command.append('--module-path={0}'.format(module_path)) if skip_tags: command.append('--skip-tags={0}'.format(skip_tags)) if start_at_task: command.append('--start-at-task={0}'.format(start_at_task)) if syntax_check: command.append('--syntax-check') if tags: command.append('--tags={0}'.format(tags)) if playbook_kwargs: for key, value in six.iteritems(playbook_kwargs): key = key.replace('_', '-') if value is True: command.append('--{0}'.format(key)) elif isinstance(value, six.text_type): command.append('--{0}={1}'.format(key, value)) elif isinstance(value, dict): command.append('--{0}={1}'.format(key, json.dumps(value))) command.append('--forks={0}'.format(forks)) cmd_kwargs = { 'env': {'ANSIBLE_STDOUT_CALLBACK': 'json', 'ANSIBLE_RETRY_FILES_ENABLED': '0'}, 'cwd': rundir, 'cmd': ' '.join(command) } ret = __salt__['cmd.run_all'](**cmd_kwargs) log.debug('Ansible Playbook Return: %s', ret) retdata = json.loads(ret['stdout']) if ret['retcode']: __context__['retcode'] = ret['retcode'] return retdata
[ "def", "playbooks", "(", "playbook", ",", "rundir", "=", "None", ",", "check", "=", "False", ",", "diff", "=", "False", ",", "extra_vars", "=", "None", ",", "flush_cache", "=", "False", ",", "forks", "=", "5", ",", "inventory", "=", "None", ",", "lim...
Run Ansible Playbooks :param playbook: Which playbook to run. :param rundir: Directory to run `ansible-playbook` in. (Default: None) :param check: don't make any changes; instead, try to predict some of the changes that may occur (Default: False) :param diff: when changing (small) files and templates, show the differences in those files; works great with --check (default: False) :param extra_vars: set additional variables as key=value or YAML/JSON, if filename prepend with @, (default: None) :param flush_cache: clear the fact cache for every host in inventory (default: False) :param forks: specify number of parallel processes to use (Default: 5) :param inventory: specify inventory host path or comma separated host list. (Default: None) (Ansible's default is /etc/ansible/hosts) :param limit: further limit selected hosts to an additional pattern (Default: None) :param list_hosts: outputs a list of matching hosts; does not execute anything else (Default: False) :param list_tags: list all available tags (Default: False) :param list_tasks: list all tasks that would be executed (Default: False) :param module_path: prepend colon-separated path(s) to module library. (Default: None) :param skip_tags: only run plays and tasks whose tags do not match these values (Default: False) :param start_at_task: start the playbook at the task matching this name (Default: None) :param: syntax_check: perform a syntax check on the playbook, but do not execute it (Default: False) :param tags: only run plays and tasks tagged with these values (Default: None) :return: Playbook return CLI Example: .. code-block:: bash salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml
[ "Run", "Ansible", "Playbooks" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L286-L381
train
saltstack/salt
salt/modules/ansiblegate.py
AnsibleModuleResolver._get_modules_map
def _get_modules_map(self, path=None): ''' Get installed Ansible modules :return: ''' paths = {} root = ansible.modules.__path__[0] if not path: path = root for p_el in os.listdir(path): p_el_path = os.path.join(path, p_el) if os.path.islink(p_el_path): continue if os.path.isdir(p_el_path): paths.update(self._get_modules_map(p_el_path)) else: if (any(p_el.startswith(elm) for elm in ['__', '.']) or not p_el.endswith('.py') or p_el in ansible.constants.IGNORE_FILES): continue p_el_path = p_el_path.replace(root, '').split('.')[0] als_name = p_el_path.replace('.', '').replace('/', '', 1).replace('/', '.') paths[als_name] = p_el_path return paths
python
def _get_modules_map(self, path=None): ''' Get installed Ansible modules :return: ''' paths = {} root = ansible.modules.__path__[0] if not path: path = root for p_el in os.listdir(path): p_el_path = os.path.join(path, p_el) if os.path.islink(p_el_path): continue if os.path.isdir(p_el_path): paths.update(self._get_modules_map(p_el_path)) else: if (any(p_el.startswith(elm) for elm in ['__', '.']) or not p_el.endswith('.py') or p_el in ansible.constants.IGNORE_FILES): continue p_el_path = p_el_path.replace(root, '').split('.')[0] als_name = p_el_path.replace('.', '').replace('/', '', 1).replace('/', '.') paths[als_name] = p_el_path return paths
[ "def", "_get_modules_map", "(", "self", ",", "path", "=", "None", ")", ":", "paths", "=", "{", "}", "root", "=", "ansible", ".", "modules", ".", "__path__", "[", "0", "]", "if", "not", "path", ":", "path", "=", "root", "for", "p_el", "in", "os", ...
Get installed Ansible modules :return:
[ "Get", "installed", "Ansible", "modules", ":", "return", ":" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L68-L92
train
saltstack/salt
salt/modules/ansiblegate.py
AnsibleModuleResolver.load_module
def load_module(self, module): ''' Introspect Ansible module. :param module: :return: ''' m_ref = self._modules_map.get(module) if m_ref is None: raise LoaderError('Module "{0}" was not found'.format(module)) mod = importlib.import_module('ansible.modules{0}'.format( '.'.join([elm.split('.')[0] for elm in m_ref.split(os.path.sep)]))) return mod
python
def load_module(self, module): ''' Introspect Ansible module. :param module: :return: ''' m_ref = self._modules_map.get(module) if m_ref is None: raise LoaderError('Module "{0}" was not found'.format(module)) mod = importlib.import_module('ansible.modules{0}'.format( '.'.join([elm.split('.')[0] for elm in m_ref.split(os.path.sep)]))) return mod
[ "def", "load_module", "(", "self", ",", "module", ")", ":", "m_ref", "=", "self", ".", "_modules_map", ".", "get", "(", "module", ")", "if", "m_ref", "is", "None", ":", "raise", "LoaderError", "(", "'Module \"{0}\" was not found'", ".", "format", "(", "mod...
Introspect Ansible module. :param module: :return:
[ "Introspect", "Ansible", "module", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L94-L107
train
saltstack/salt
salt/modules/ansiblegate.py
AnsibleModuleResolver.get_modules_list
def get_modules_list(self, pattern=None): ''' Return module map references. :return: ''' if pattern and '*' not in pattern: pattern = '*{0}*'.format(pattern) modules = [] for m_name, m_path in self._modules_map.items(): m_path = m_path.split('.')[0] m_name = '.'.join([elm for elm in m_path.split(os.path.sep) if elm]) if pattern and fnmatch.fnmatch(m_name, pattern) or not pattern: modules.append(m_name) return sorted(modules)
python
def get_modules_list(self, pattern=None): ''' Return module map references. :return: ''' if pattern and '*' not in pattern: pattern = '*{0}*'.format(pattern) modules = [] for m_name, m_path in self._modules_map.items(): m_path = m_path.split('.')[0] m_name = '.'.join([elm for elm in m_path.split(os.path.sep) if elm]) if pattern and fnmatch.fnmatch(m_name, pattern) or not pattern: modules.append(m_name) return sorted(modules)
[ "def", "get_modules_list", "(", "self", ",", "pattern", "=", "None", ")", ":", "if", "pattern", "and", "'*'", "not", "in", "pattern", ":", "pattern", "=", "'*{0}*'", ".", "format", "(", "pattern", ")", "modules", "=", "[", "]", "for", "m_name", ",", ...
Return module map references. :return:
[ "Return", "module", "map", "references", ".", ":", "return", ":" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L109-L122
train
saltstack/salt
salt/modules/ansiblegate.py
AnsibleModuleCaller.call
def call(self, module, *args, **kwargs): ''' Call an Ansible module by invoking it. :param module: the name of the module. :param args: Arguments to the module :param kwargs: keywords to the module :return: ''' module = self._resolver.load_module(module) if not hasattr(module, 'main'): raise CommandExecutionError('This module is not callable ' '(see "ansible.help {0}")'.format(module.__name__.replace('ansible.modules.', ''))) if args: kwargs['_raw_params'] = ' '.join(args) js_args = str('{{"ANSIBLE_MODULE_ARGS": {args}}}') # future lint: disable=blacklisted-function js_args = js_args.format(args=salt.utils.json.dumps(kwargs)) proc_out = salt.utils.timed_subprocess.TimedProc( ["echo", "{0}".format(js_args)], stdout=subprocess.PIPE, timeout=self.timeout) proc_out.run() proc_exc = salt.utils.timed_subprocess.TimedProc( ['python', module.__file__], stdin=proc_out.stdout, stdout=subprocess.PIPE, timeout=self.timeout) proc_exc.run() try: out = salt.utils.json.loads(proc_exc.stdout) except ValueError as ex: out = {'Error': (proc_exc.stderr and (proc_exc.stderr + '.') or six.text_type(ex))} if proc_exc.stdout: out['Given JSON output'] = proc_exc.stdout return out if 'invocation' in out: del out['invocation'] out['timeout'] = self.timeout return out
python
def call(self, module, *args, **kwargs): ''' Call an Ansible module by invoking it. :param module: the name of the module. :param args: Arguments to the module :param kwargs: keywords to the module :return: ''' module = self._resolver.load_module(module) if not hasattr(module, 'main'): raise CommandExecutionError('This module is not callable ' '(see "ansible.help {0}")'.format(module.__name__.replace('ansible.modules.', ''))) if args: kwargs['_raw_params'] = ' '.join(args) js_args = str('{{"ANSIBLE_MODULE_ARGS": {args}}}') # future lint: disable=blacklisted-function js_args = js_args.format(args=salt.utils.json.dumps(kwargs)) proc_out = salt.utils.timed_subprocess.TimedProc( ["echo", "{0}".format(js_args)], stdout=subprocess.PIPE, timeout=self.timeout) proc_out.run() proc_exc = salt.utils.timed_subprocess.TimedProc( ['python', module.__file__], stdin=proc_out.stdout, stdout=subprocess.PIPE, timeout=self.timeout) proc_exc.run() try: out = salt.utils.json.loads(proc_exc.stdout) except ValueError as ex: out = {'Error': (proc_exc.stderr and (proc_exc.stderr + '.') or six.text_type(ex))} if proc_exc.stdout: out['Given JSON output'] = proc_exc.stdout return out if 'invocation' in out: del out['invocation'] out['timeout'] = self.timeout return out
[ "def", "call", "(", "self", ",", "module", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "module", "=", "self", ".", "_resolver", ".", "load_module", "(", "module", ")", "if", "not", "hasattr", "(", "module", ",", "'main'", ")", ":", "raise"...
Call an Ansible module by invoking it. :param module: the name of the module. :param args: Arguments to the module :param kwargs: keywords to the module :return:
[ "Call", "an", "Ansible", "module", "by", "invoking", "it", ".", ":", "param", "module", ":", "the", "name", "of", "the", "module", ".", ":", "param", "args", ":", "Arguments", "to", "the", "module", ":", "param", "kwargs", ":", "keywords", "to", "the",...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ansiblegate.py#L142-L183
train
saltstack/salt
salt/modules/file.py
__clean_tmp
def __clean_tmp(sfn): ''' Clean out a template temp file ''' if sfn.startswith(os.path.join(tempfile.gettempdir(), salt.utils.files.TEMPFILE_PREFIX)): # Don't remove if it exists in file_roots (any saltenv) all_roots = itertools.chain.from_iterable( six.itervalues(__opts__['file_roots'])) in_roots = any(sfn.startswith(root) for root in all_roots) # Only clean up files that exist if os.path.exists(sfn) and not in_roots: os.remove(sfn)
python
def __clean_tmp(sfn): ''' Clean out a template temp file ''' if sfn.startswith(os.path.join(tempfile.gettempdir(), salt.utils.files.TEMPFILE_PREFIX)): # Don't remove if it exists in file_roots (any saltenv) all_roots = itertools.chain.from_iterable( six.itervalues(__opts__['file_roots'])) in_roots = any(sfn.startswith(root) for root in all_roots) # Only clean up files that exist if os.path.exists(sfn) and not in_roots: os.remove(sfn)
[ "def", "__clean_tmp", "(", "sfn", ")", ":", "if", "sfn", ".", "startswith", "(", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "salt", ".", "utils", ".", "files", ".", "TEMPFILE_PREFIX", ")", ")", ":", "# Don't re...
Clean out a template temp file
[ "Clean", "out", "a", "template", "temp", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L88-L100
train
saltstack/salt
salt/modules/file.py
_binary_replace
def _binary_replace(old, new): ''' This function does NOT do any diffing, it just checks the old and new files to see if either is binary, and provides an appropriate string noting the difference between the two files. If neither file is binary, an empty string is returned. This function should only be run AFTER it has been determined that the files differ. ''' old_isbin = not __utils__['files.is_text'](old) new_isbin = not __utils__['files.is_text'](new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): return 'Replace binary file' elif old_isbin: return 'Replace binary file with text file' elif new_isbin: return 'Replace text file with binary file' return ''
python
def _binary_replace(old, new): ''' This function does NOT do any diffing, it just checks the old and new files to see if either is binary, and provides an appropriate string noting the difference between the two files. If neither file is binary, an empty string is returned. This function should only be run AFTER it has been determined that the files differ. ''' old_isbin = not __utils__['files.is_text'](old) new_isbin = not __utils__['files.is_text'](new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): return 'Replace binary file' elif old_isbin: return 'Replace binary file with text file' elif new_isbin: return 'Replace text file with binary file' return ''
[ "def", "_binary_replace", "(", "old", ",", "new", ")", ":", "old_isbin", "=", "not", "__utils__", "[", "'files.is_text'", "]", "(", "old", ")", "new_isbin", "=", "not", "__utils__", "[", "'files.is_text'", "]", "(", "new", ")", "if", "any", "(", "(", "...
This function does NOT do any diffing, it just checks the old and new files to see if either is binary, and provides an appropriate string noting the difference between the two files. If neither file is binary, an empty string is returned. This function should only be run AFTER it has been determined that the files differ.
[ "This", "function", "does", "NOT", "do", "any", "diffing", "it", "just", "checks", "the", "old", "and", "new", "files", "to", "see", "if", "either", "is", "binary", "and", "provides", "an", "appropriate", "string", "noting", "the", "difference", "between", ...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L112-L131
train
saltstack/salt
salt/modules/file.py
_splitlines_preserving_trailing_newline
def _splitlines_preserving_trailing_newline(str): ''' Returns a list of the lines in the string, breaking at line boundaries and preserving a trailing newline (if present). Essentially, this works like ``str.striplines(False)`` but preserves an empty line at the end. This is equivalent to the following code: .. code-block:: python lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') ''' lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') return lines
python
def _splitlines_preserving_trailing_newline(str): ''' Returns a list of the lines in the string, breaking at line boundaries and preserving a trailing newline (if present). Essentially, this works like ``str.striplines(False)`` but preserves an empty line at the end. This is equivalent to the following code: .. code-block:: python lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') ''' lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') return lines
[ "def", "_splitlines_preserving_trailing_newline", "(", "str", ")", ":", "lines", "=", "str", ".", "splitlines", "(", ")", "if", "str", ".", "endswith", "(", "'\\n'", ")", "or", "str", ".", "endswith", "(", "'\\r'", ")", ":", "lines", ".", "append", "(", ...
Returns a list of the lines in the string, breaking at line boundaries and preserving a trailing newline (if present). Essentially, this works like ``str.striplines(False)`` but preserves an empty line at the end. This is equivalent to the following code: .. code-block:: python lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('')
[ "Returns", "a", "list", "of", "the", "lines", "in", "the", "string", "breaking", "at", "line", "boundaries", "and", "preserving", "a", "trailing", "newline", "(", "if", "present", ")", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L142-L159
train
saltstack/salt
salt/modules/file.py
gid_to_group
def gid_to_group(gid): ''' Convert the group id to the group name on this system gid gid to convert to a group name CLI Example: .. code-block:: bash salt '*' file.gid_to_group 0 ''' try: gid = int(gid) except ValueError: # This is not an integer, maybe it's already the group name? gid = group_to_gid(gid) if gid == '': # Don't even bother to feed it to grp return '' try: return grp.getgrgid(gid).gr_name except (KeyError, NameError): # If group is not present, fall back to the gid. return gid
python
def gid_to_group(gid): ''' Convert the group id to the group name on this system gid gid to convert to a group name CLI Example: .. code-block:: bash salt '*' file.gid_to_group 0 ''' try: gid = int(gid) except ValueError: # This is not an integer, maybe it's already the group name? gid = group_to_gid(gid) if gid == '': # Don't even bother to feed it to grp return '' try: return grp.getgrgid(gid).gr_name except (KeyError, NameError): # If group is not present, fall back to the gid. return gid
[ "def", "gid_to_group", "(", "gid", ")", ":", "try", ":", "gid", "=", "int", "(", "gid", ")", "except", "ValueError", ":", "# This is not an integer, maybe it's already the group name?", "gid", "=", "group_to_gid", "(", "gid", ")", "if", "gid", "==", "''", ":",...
Convert the group id to the group name on this system gid gid to convert to a group name CLI Example: .. code-block:: bash salt '*' file.gid_to_group 0
[ "Convert", "the", "group", "id", "to", "the", "group", "name", "on", "this", "system" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L162-L189
train
saltstack/salt
salt/modules/file.py
group_to_gid
def group_to_gid(group): ''' Convert the group to the gid on this system group group to convert to its gid CLI Example: .. code-block:: bash salt '*' file.group_to_gid root ''' if group is None: return '' try: if isinstance(group, int): return group return grp.getgrnam(group).gr_gid except KeyError: return ''
python
def group_to_gid(group): ''' Convert the group to the gid on this system group group to convert to its gid CLI Example: .. code-block:: bash salt '*' file.group_to_gid root ''' if group is None: return '' try: if isinstance(group, int): return group return grp.getgrnam(group).gr_gid except KeyError: return ''
[ "def", "group_to_gid", "(", "group", ")", ":", "if", "group", "is", "None", ":", "return", "''", "try", ":", "if", "isinstance", "(", "group", ",", "int", ")", ":", "return", "group", "return", "grp", ".", "getgrnam", "(", "group", ")", ".", "gr_gid"...
Convert the group to the gid on this system group group to convert to its gid CLI Example: .. code-block:: bash salt '*' file.group_to_gid root
[ "Convert", "the", "group", "to", "the", "gid", "on", "this", "system" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L192-L212
train
saltstack/salt
salt/modules/file.py
get_gid
def get_gid(path, follow_symlinks=True): ''' Return the id of the group that owns a given file path file or directory of which to get the gid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_gid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
python
def get_gid(path, follow_symlinks=True): ''' Return the id of the group that owns a given file path file or directory of which to get the gid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_gid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
[ "def", "get_gid", "(", "path", ",", "follow_symlinks", "=", "True", ")", ":", "return", "stats", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "follow_symlinks", "=", "follow_symlinks", ")", ".", "get", "(", "'gid'", ",", "-", "1", ...
Return the id of the group that owns a given file path file or directory of which to get the gid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_gid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added
[ "Return", "the", "id", "of", "the", "group", "that", "owns", "a", "given", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L215-L235
train
saltstack/salt
salt/modules/file.py
get_group
def get_group(path, follow_symlinks=True): ''' Return the group that owns a given file path file or directory of which to get the group follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_group /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False)
python
def get_group(path, follow_symlinks=True): ''' Return the group that owns a given file path file or directory of which to get the group follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_group /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False)
[ "def", "get_group", "(", "path", ",", "follow_symlinks", "=", "True", ")", ":", "return", "stats", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "follow_symlinks", "=", "follow_symlinks", ")", ".", "get", "(", "'group'", ",", "False",...
Return the group that owns a given file path file or directory of which to get the group follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_group /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added
[ "Return", "the", "group", "that", "owns", "a", "given", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L238-L257
train
saltstack/salt
salt/modules/file.py
user_to_uid
def user_to_uid(user): ''' Convert user name to a uid user user name to convert to its uid CLI Example: .. code-block:: bash salt '*' file.user_to_uid root ''' if user is None: user = salt.utils.user.get_user() try: if isinstance(user, int): return user return pwd.getpwnam(user).pw_uid except KeyError: return ''
python
def user_to_uid(user): ''' Convert user name to a uid user user name to convert to its uid CLI Example: .. code-block:: bash salt '*' file.user_to_uid root ''' if user is None: user = salt.utils.user.get_user() try: if isinstance(user, int): return user return pwd.getpwnam(user).pw_uid except KeyError: return ''
[ "def", "user_to_uid", "(", "user", ")", ":", "if", "user", "is", "None", ":", "user", "=", "salt", ".", "utils", ".", "user", ".", "get_user", "(", ")", "try", ":", "if", "isinstance", "(", "user", ",", "int", ")", ":", "return", "user", "return", ...
Convert user name to a uid user user name to convert to its uid CLI Example: .. code-block:: bash salt '*' file.user_to_uid root
[ "Convert", "user", "name", "to", "a", "uid" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L280-L300
train
saltstack/salt
salt/modules/file.py
get_uid
def get_uid(path, follow_symlinks=True): ''' Return the id of the user that owns a given file path file or directory of which to get the uid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_uid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1)
python
def get_uid(path, follow_symlinks=True): ''' Return the id of the user that owns a given file path file or directory of which to get the uid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_uid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1)
[ "def", "get_uid", "(", "path", ",", "follow_symlinks", "=", "True", ")", ":", "return", "stats", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "follow_symlinks", "=", "follow_symlinks", ")", ".", "get", "(", "'uid'", ",", "-", "1", ...
Return the id of the user that owns a given file path file or directory of which to get the uid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_uid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added
[ "Return", "the", "id", "of", "the", "user", "that", "owns", "a", "given", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L303-L322
train
saltstack/salt
salt/modules/file.py
get_mode
def get_mode(path, follow_symlinks=True): ''' Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
python
def get_mode(path, follow_symlinks=True): ''' Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
[ "def", "get_mode", "(", "path", ",", "follow_symlinks", "=", "True", ")", ":", "return", "stats", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "follow_symlinks", "=", "follow_symlinks", ")", ".", "get", "(", "'mode'", ",", "''", ")...
Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added
[ "Return", "the", "mode", "of", "a", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L347-L366
train
saltstack/salt
salt/modules/file.py
set_mode
def set_mode(path, mode): ''' Set the mode of a file path file or directory of which to set the mode mode mode to set the path to CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644 ''' path = os.path.expanduser(path) mode = six.text_type(mode).lstrip('0Oo') if not mode: mode = '0' if not os.path.exists(path): raise CommandExecutionError('{0}: File not found'.format(path)) try: os.chmod(path, int(mode, 8)) except Exception: return 'Invalid Mode ' + mode return get_mode(path)
python
def set_mode(path, mode): ''' Set the mode of a file path file or directory of which to set the mode mode mode to set the path to CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644 ''' path = os.path.expanduser(path) mode = six.text_type(mode).lstrip('0Oo') if not mode: mode = '0' if not os.path.exists(path): raise CommandExecutionError('{0}: File not found'.format(path)) try: os.chmod(path, int(mode, 8)) except Exception: return 'Invalid Mode ' + mode return get_mode(path)
[ "def", "set_mode", "(", "path", ",", "mode", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "mode", "=", "six", ".", "text_type", "(", "mode", ")", ".", "lstrip", "(", "'0Oo'", ")", "if", "not", "mode", ":", "mode"...
Set the mode of a file path file or directory of which to set the mode mode mode to set the path to CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644
[ "Set", "the", "mode", "of", "a", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L369-L396
train
saltstack/salt
salt/modules/file.py
lchown
def lchown(path, user, group): ''' Chown a file, pass the file the desired user and group without following symlinks. path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 return os.lchown(path, uid, gid)
python
def lchown(path, user, group): ''' Chown a file, pass the file the desired user and group without following symlinks. path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 return os.lchown(path, uid, gid)
[ "def", "lchown", "(", "path", ",", "user", ",", "group", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "uid", "=", "user_to_uid", "(", "user", ")", "gid", "=", "group_to_gid", "(", "group", ")", "err", "=", "''", ...
Chown a file, pass the file the desired user and group without following symlinks. path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root
[ "Chown", "a", "file", "pass", "the", "file", "the", "desired", "user", "and", "group", "without", "following", "symlinks", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L399-L435
train
saltstack/salt
salt/modules/file.py
chown
def chown(path, user, group): ''' Chown a file, pass the file the desired user and group path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 if not os.path.exists(path): try: # Broken symlinks will return false, but still need to be chowned return os.lchown(path, uid, gid) except OSError: pass err += 'File not found' if err: return err return os.chown(path, uid, gid)
python
def chown(path, user, group): ''' Chown a file, pass the file the desired user and group path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 if not os.path.exists(path): try: # Broken symlinks will return false, but still need to be chowned return os.lchown(path, uid, gid) except OSError: pass err += 'File not found' if err: return err return os.chown(path, uid, gid)
[ "def", "chown", "(", "path", ",", "user", ",", "group", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "uid", "=", "user_to_uid", "(", "user", ")", "gid", "=", "group_to_gid", "(", "group", ")", "err", "=", "''", "...
Chown a file, pass the file the desired user and group path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root
[ "Chown", "a", "file", "pass", "the", "file", "the", "desired", "user", "and", "group" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L438-L481
train
saltstack/salt
salt/modules/file.py
chgrp
def chgrp(path, group): ''' Change the group of a file path path to the file or directory group group owner CLI Example: .. code-block:: bash salt '*' file.chgrp /etc/passwd root ''' path = os.path.expanduser(path) user = get_user(path) return chown(path, user, group)
python
def chgrp(path, group): ''' Change the group of a file path path to the file or directory group group owner CLI Example: .. code-block:: bash salt '*' file.chgrp /etc/passwd root ''' path = os.path.expanduser(path) user = get_user(path) return chown(path, user, group)
[ "def", "chgrp", "(", "path", ",", "group", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "user", "=", "get_user", "(", "path", ")", "return", "chown", "(", "path", ",", "user", ",", "group", ")" ]
Change the group of a file path path to the file or directory group group owner CLI Example: .. code-block:: bash salt '*' file.chgrp /etc/passwd root
[ "Change", "the", "group", "of", "a", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L484-L503
train
saltstack/salt
salt/modules/file.py
_cmp_attrs
def _cmp_attrs(path, attrs): ''' .. versionadded:: 2018.3.0 Compare attributes of a given file to given attributes. Returns a pair (list) where first item are attributes to add and second item are to be removed. Please take into account when using this function that some minions will not have lsattr installed. path path to file to compare attributes with. attrs string of attributes to compare against a given file ''' diff = [None, None] # lsattr for AIX is not the same thing as lsattr for linux. if salt.utils.platform.is_aix(): return None try: lattrs = lsattr(path).get(path, '') except AttributeError: # lsattr not installed return None old = [chr for chr in lattrs if chr not in attrs] if old: diff[1] = ''.join(old) new = [chr for chr in attrs if chr not in lattrs] if new: diff[0] = ''.join(new) return diff
python
def _cmp_attrs(path, attrs): ''' .. versionadded:: 2018.3.0 Compare attributes of a given file to given attributes. Returns a pair (list) where first item are attributes to add and second item are to be removed. Please take into account when using this function that some minions will not have lsattr installed. path path to file to compare attributes with. attrs string of attributes to compare against a given file ''' diff = [None, None] # lsattr for AIX is not the same thing as lsattr for linux. if salt.utils.platform.is_aix(): return None try: lattrs = lsattr(path).get(path, '') except AttributeError: # lsattr not installed return None old = [chr for chr in lattrs if chr not in attrs] if old: diff[1] = ''.join(old) new = [chr for chr in attrs if chr not in lattrs] if new: diff[0] = ''.join(new) return diff
[ "def", "_cmp_attrs", "(", "path", ",", "attrs", ")", ":", "diff", "=", "[", "None", ",", "None", "]", "# lsattr for AIX is not the same thing as lsattr for linux.", "if", "salt", ".", "utils", ".", "platform", ".", "is_aix", "(", ")", ":", "return", "None", ...
.. versionadded:: 2018.3.0 Compare attributes of a given file to given attributes. Returns a pair (list) where first item are attributes to add and second item are to be removed. Please take into account when using this function that some minions will not have lsattr installed. path path to file to compare attributes with. attrs string of attributes to compare against a given file
[ "..", "versionadded", "::", "2018", ".", "3", ".", "0" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L506-L543
train
saltstack/salt
salt/modules/file.py
lsattr
def lsattr(path): ''' .. versionadded:: 2018.3.0 .. versionchanged:: 2018.3.1 If ``lsattr`` is not installed on the system, ``None`` is returned. .. versionchanged:: 2018.3.4 If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX`` is not the same thing as the linux version. Obtain the modifiable attributes of the given file. If path is to a directory, an empty list is returned. path path to file to obtain attributes of. File/directory must exist. CLI Example: .. code-block:: bash salt '*' file.lsattr foo1.txt ''' if not salt.utils.path.which('lsattr') or salt.utils.platform.is_aix(): return None if not os.path.exists(path): raise SaltInvocationError("File or directory does not exist: " + path) cmd = ['lsattr', path] result = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False) results = {} for line in result.splitlines(): if not line.startswith('lsattr: '): vals = line.split(None, 1) results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0]) return results
python
def lsattr(path): ''' .. versionadded:: 2018.3.0 .. versionchanged:: 2018.3.1 If ``lsattr`` is not installed on the system, ``None`` is returned. .. versionchanged:: 2018.3.4 If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX`` is not the same thing as the linux version. Obtain the modifiable attributes of the given file. If path is to a directory, an empty list is returned. path path to file to obtain attributes of. File/directory must exist. CLI Example: .. code-block:: bash salt '*' file.lsattr foo1.txt ''' if not salt.utils.path.which('lsattr') or salt.utils.platform.is_aix(): return None if not os.path.exists(path): raise SaltInvocationError("File or directory does not exist: " + path) cmd = ['lsattr', path] result = __salt__['cmd.run'](cmd, ignore_retcode=True, python_shell=False) results = {} for line in result.splitlines(): if not line.startswith('lsattr: '): vals = line.split(None, 1) results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0]) return results
[ "def", "lsattr", "(", "path", ")", ":", "if", "not", "salt", ".", "utils", ".", "path", ".", "which", "(", "'lsattr'", ")", "or", "salt", ".", "utils", ".", "platform", ".", "is_aix", "(", ")", ":", "return", "None", "if", "not", "os", ".", "path...
.. versionadded:: 2018.3.0 .. versionchanged:: 2018.3.1 If ``lsattr`` is not installed on the system, ``None`` is returned. .. versionchanged:: 2018.3.4 If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX`` is not the same thing as the linux version. Obtain the modifiable attributes of the given file. If path is to a directory, an empty list is returned. path path to file to obtain attributes of. File/directory must exist. CLI Example: .. code-block:: bash salt '*' file.lsattr foo1.txt
[ "..", "versionadded", "::", "2018", ".", "3", ".", "0", "..", "versionchanged", "::", "2018", ".", "3", ".", "1", "If", "lsattr", "is", "not", "installed", "on", "the", "system", "None", "is", "returned", ".", "..", "versionchanged", "::", "2018", ".",...
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L546-L582
train
saltstack/salt
salt/modules/file.py
chattr
def chattr(*files, **kwargs): ''' .. versionadded:: 2018.3.0 Change the attributes of files. This function accepts one or more files and the following options: operator Can be wither ``add`` or ``remove``. Determines whether attributes should be added or removed from files attributes One or more of the following characters: ``aAcCdDeijPsStTu``, representing attributes to add to/remove from files version a version number to assign to the file(s) flags One or more of the following characters: ``RVf``, representing flags to assign to chattr (recurse, verbose, suppress most errors) CLI Example: .. code-block:: bash salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai salt '*' file.chattr foo3.txt operator=remove attributes=i version=2 ''' operator = kwargs.pop('operator', None) attributes = kwargs.pop('attributes', None) flags = kwargs.pop('flags', None) version = kwargs.pop('version', None) if (operator is None) or (operator not in ('add', 'remove')): raise SaltInvocationError( "Need an operator: 'add' or 'remove' to modify attributes.") if attributes is None: raise SaltInvocationError("Need attributes: [aAcCdDeijPsStTu]") cmd = ['chattr'] if operator == "add": attrs = '+{0}'.format(attributes) elif operator == "remove": attrs = '-{0}'.format(attributes) cmd.append(attrs) if flags is not None: cmd.append('-{0}'.format(flags)) if version is not None: cmd.extend(['-v', version]) cmd.extend(files) result = __salt__['cmd.run'](cmd, python_shell=False) if bool(result): raise CommandExecutionError( "chattr failed to run, possibly due to bad parameters.") return True
python
def chattr(*files, **kwargs): ''' .. versionadded:: 2018.3.0 Change the attributes of files. This function accepts one or more files and the following options: operator Can be wither ``add`` or ``remove``. Determines whether attributes should be added or removed from files attributes One or more of the following characters: ``aAcCdDeijPsStTu``, representing attributes to add to/remove from files version a version number to assign to the file(s) flags One or more of the following characters: ``RVf``, representing flags to assign to chattr (recurse, verbose, suppress most errors) CLI Example: .. code-block:: bash salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai salt '*' file.chattr foo3.txt operator=remove attributes=i version=2 ''' operator = kwargs.pop('operator', None) attributes = kwargs.pop('attributes', None) flags = kwargs.pop('flags', None) version = kwargs.pop('version', None) if (operator is None) or (operator not in ('add', 'remove')): raise SaltInvocationError( "Need an operator: 'add' or 'remove' to modify attributes.") if attributes is None: raise SaltInvocationError("Need attributes: [aAcCdDeijPsStTu]") cmd = ['chattr'] if operator == "add": attrs = '+{0}'.format(attributes) elif operator == "remove": attrs = '-{0}'.format(attributes) cmd.append(attrs) if flags is not None: cmd.append('-{0}'.format(flags)) if version is not None: cmd.extend(['-v', version]) cmd.extend(files) result = __salt__['cmd.run'](cmd, python_shell=False) if bool(result): raise CommandExecutionError( "chattr failed to run, possibly due to bad parameters.") return True
[ "def", "chattr", "(", "*", "files", ",", "*", "*", "kwargs", ")", ":", "operator", "=", "kwargs", ".", "pop", "(", "'operator'", ",", "None", ")", "attributes", "=", "kwargs", ".", "pop", "(", "'attributes'", ",", "None", ")", "flags", "=", "kwargs",...
.. versionadded:: 2018.3.0 Change the attributes of files. This function accepts one or more files and the following options: operator Can be wither ``add`` or ``remove``. Determines whether attributes should be added or removed from files attributes One or more of the following characters: ``aAcCdDeijPsStTu``, representing attributes to add to/remove from files version a version number to assign to the file(s) flags One or more of the following characters: ``RVf``, representing flags to assign to chattr (recurse, verbose, suppress most errors) CLI Example: .. code-block:: bash salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai salt '*' file.chattr foo3.txt operator=remove attributes=i version=2
[ "..", "versionadded", "::", "2018", ".", "3", ".", "0" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L585-L648
train
saltstack/salt
salt/modules/file.py
get_sum
def get_sum(path, form='sha256'): ''' Return the checksum for the given file. The following checksum algorithms are supported: * md5 * sha1 * sha224 * sha256 **(default)** * sha384 * sha512 path path to the file or directory form desired sum format CLI Example: .. code-block:: bash salt '*' file.get_sum /etc/passwd sha512 ''' path = os.path.expanduser(path) if not os.path.isfile(path): return 'File not found' return salt.utils.hashutils.get_hash(path, form, 4096)
python
def get_sum(path, form='sha256'): ''' Return the checksum for the given file. The following checksum algorithms are supported: * md5 * sha1 * sha224 * sha256 **(default)** * sha384 * sha512 path path to the file or directory form desired sum format CLI Example: .. code-block:: bash salt '*' file.get_sum /etc/passwd sha512 ''' path = os.path.expanduser(path) if not os.path.isfile(path): return 'File not found' return salt.utils.hashutils.get_hash(path, form, 4096)
[ "def", "get_sum", "(", "path", ",", "form", "=", "'sha256'", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "'File not found'", "return", ...
Return the checksum for the given file. The following checksum algorithms are supported: * md5 * sha1 * sha224 * sha256 **(default)** * sha384 * sha512 path path to the file or directory form desired sum format CLI Example: .. code-block:: bash salt '*' file.get_sum /etc/passwd sha512
[ "Return", "the", "checksum", "for", "the", "given", "file", ".", "The", "following", "checksum", "algorithms", "are", "supported", ":" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L651-L679
train
saltstack/salt
salt/modules/file.py
get_hash
def get_hash(path, form='sha256', chunk_size=65536): ''' Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` path path to the file or directory form desired sum format chunk_size amount to sum at once CLI Example: .. code-block:: bash salt '*' file.get_hash /etc/shadow ''' return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
python
def get_hash(path, form='sha256', chunk_size=65536): ''' Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` path path to the file or directory form desired sum format chunk_size amount to sum at once CLI Example: .. code-block:: bash salt '*' file.get_hash /etc/shadow ''' return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
[ "def", "get_hash", "(", "path", ",", "form", "=", "'sha256'", ",", "chunk_size", "=", "65536", ")", ":", "return", "salt", ".", "utils", ".", "hashutils", ".", "get_hash", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "form", ",",...
Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` path path to the file or directory form desired sum format chunk_size amount to sum at once CLI Example: .. code-block:: bash salt '*' file.get_hash /etc/shadow
[ "Get", "the", "hash", "sum", "of", "a", "file" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L682-L707
train
saltstack/salt
salt/modules/file.py
get_source_sum
def get_source_sum(file_name='', source='', source_hash=None, source_hash_name=None, saltenv='base'): ''' .. versionadded:: 2016.11.0 Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to obtain the hash and hash type from the parameters specified below. file_name Optional file name being managed, for matching with :py:func:`file.extract_hash <salt.modules.file.extract_hash>`. source Source file, as used in :py:mod:`file <salt.states.file>` and other states. If ``source_hash`` refers to a file containing hashes, then this filename will be used to match a filename in that file. If the ``source_hash`` is a hash expression, then this argument will be ignored. source_hash Hash file/expression, as used in :py:mod:`file <salt.states.file>` and other states. If this value refers to a remote URL or absolute path to a local file, it will be cached and :py:func:`file.extract_hash <salt.modules.file.extract_hash>` will be used to obtain a hash from it. source_hash_name Specific file name to look for when ``source_hash`` refers to a remote file, used to disambiguate ambiguous matches. saltenv : base Salt fileserver environment from which to retrieve the source_hash. This value will only be used when ``source_hash`` refers to a file on the Salt fileserver (i.e. one beginning with ``salt://``). CLI Example: .. code-block:: bash salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz ''' def _invalid_source_hash_format(): ''' DRY helper for reporting invalid source_hash input ''' raise CommandExecutionError( 'Source hash {0} format is invalid. The supported formats are: ' '1) a hash, 2) an expression in the format <hash_type>=<hash>, or ' '3) either a path to a local file containing hashes, or a URI of ' 'a remote hash file. Supported protocols for remote hash files ' 'are: {1}. The hash may also not be of a valid length, the ' 'following are supported hash types and lengths: {2}.'.format( source_hash, ', '.join(salt.utils.files.VALID_PROTOS), ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) hash_fn = None if os.path.isabs(source_hash): hash_fn = source_hash else: try: proto = _urlparse(source_hash).scheme if proto in salt.utils.files.VALID_PROTOS: hash_fn = __salt__['cp.cache_file'](source_hash, saltenv) if not hash_fn: raise CommandExecutionError( 'Source hash file {0} not found'.format(source_hash) ) else: if proto != '': # Some unsupported protocol (e.g. foo://) is being used. # We'll get into this else block if a hash expression # (like md5=<md5 checksum here>), but in those cases, the # protocol will be an empty string, in which case we avoid # this error condition. _invalid_source_hash_format() except (AttributeError, TypeError): _invalid_source_hash_format() if hash_fn is not None: ret = extract_hash(hash_fn, '', file_name, source, source_hash_name) if ret is None: _invalid_source_hash_format() ret['hsum'] = ret['hsum'].lower() return ret else: # The source_hash is a hash expression ret = {} try: ret['hash_type'], ret['hsum'] = \ [x.strip() for x in source_hash.split('=', 1)] except AttributeError: _invalid_source_hash_format() except ValueError: # No hash type, try to figure out by hash length if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash): _invalid_source_hash_format() ret['hsum'] = source_hash source_hash_len = len(source_hash) if source_hash_len in HASHES_REVMAP: ret['hash_type'] = HASHES_REVMAP[source_hash_len] else: _invalid_source_hash_format() if ret['hash_type'] not in HASHES: raise CommandExecutionError( 'Invalid hash type \'{0}\'. Supported hash types are: {1}. ' 'Either remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to a supported type.' .format(ret['hash_type'], ', '.join(HASHES), ret['hsum']) ) else: hsum_len = len(ret['hsum']) if hsum_len not in HASHES_REVMAP: _invalid_source_hash_format() elif hsum_len != HASHES[ret['hash_type']]: raise CommandExecutionError( 'Invalid length ({0}) for hash type \'{1}\'. Either ' 'remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to \'{3}\''.format( hsum_len, ret['hash_type'], ret['hsum'], HASHES_REVMAP[hsum_len], ) ) ret['hsum'] = ret['hsum'].lower() return ret
python
def get_source_sum(file_name='', source='', source_hash=None, source_hash_name=None, saltenv='base'): ''' .. versionadded:: 2016.11.0 Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to obtain the hash and hash type from the parameters specified below. file_name Optional file name being managed, for matching with :py:func:`file.extract_hash <salt.modules.file.extract_hash>`. source Source file, as used in :py:mod:`file <salt.states.file>` and other states. If ``source_hash`` refers to a file containing hashes, then this filename will be used to match a filename in that file. If the ``source_hash`` is a hash expression, then this argument will be ignored. source_hash Hash file/expression, as used in :py:mod:`file <salt.states.file>` and other states. If this value refers to a remote URL or absolute path to a local file, it will be cached and :py:func:`file.extract_hash <salt.modules.file.extract_hash>` will be used to obtain a hash from it. source_hash_name Specific file name to look for when ``source_hash`` refers to a remote file, used to disambiguate ambiguous matches. saltenv : base Salt fileserver environment from which to retrieve the source_hash. This value will only be used when ``source_hash`` refers to a file on the Salt fileserver (i.e. one beginning with ``salt://``). CLI Example: .. code-block:: bash salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz ''' def _invalid_source_hash_format(): ''' DRY helper for reporting invalid source_hash input ''' raise CommandExecutionError( 'Source hash {0} format is invalid. The supported formats are: ' '1) a hash, 2) an expression in the format <hash_type>=<hash>, or ' '3) either a path to a local file containing hashes, or a URI of ' 'a remote hash file. Supported protocols for remote hash files ' 'are: {1}. The hash may also not be of a valid length, the ' 'following are supported hash types and lengths: {2}.'.format( source_hash, ', '.join(salt.utils.files.VALID_PROTOS), ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) hash_fn = None if os.path.isabs(source_hash): hash_fn = source_hash else: try: proto = _urlparse(source_hash).scheme if proto in salt.utils.files.VALID_PROTOS: hash_fn = __salt__['cp.cache_file'](source_hash, saltenv) if not hash_fn: raise CommandExecutionError( 'Source hash file {0} not found'.format(source_hash) ) else: if proto != '': # Some unsupported protocol (e.g. foo://) is being used. # We'll get into this else block if a hash expression # (like md5=<md5 checksum here>), but in those cases, the # protocol will be an empty string, in which case we avoid # this error condition. _invalid_source_hash_format() except (AttributeError, TypeError): _invalid_source_hash_format() if hash_fn is not None: ret = extract_hash(hash_fn, '', file_name, source, source_hash_name) if ret is None: _invalid_source_hash_format() ret['hsum'] = ret['hsum'].lower() return ret else: # The source_hash is a hash expression ret = {} try: ret['hash_type'], ret['hsum'] = \ [x.strip() for x in source_hash.split('=', 1)] except AttributeError: _invalid_source_hash_format() except ValueError: # No hash type, try to figure out by hash length if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash): _invalid_source_hash_format() ret['hsum'] = source_hash source_hash_len = len(source_hash) if source_hash_len in HASHES_REVMAP: ret['hash_type'] = HASHES_REVMAP[source_hash_len] else: _invalid_source_hash_format() if ret['hash_type'] not in HASHES: raise CommandExecutionError( 'Invalid hash type \'{0}\'. Supported hash types are: {1}. ' 'Either remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to a supported type.' .format(ret['hash_type'], ', '.join(HASHES), ret['hsum']) ) else: hsum_len = len(ret['hsum']) if hsum_len not in HASHES_REVMAP: _invalid_source_hash_format() elif hsum_len != HASHES[ret['hash_type']]: raise CommandExecutionError( 'Invalid length ({0}) for hash type \'{1}\'. Either ' 'remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to \'{3}\''.format( hsum_len, ret['hash_type'], ret['hsum'], HASHES_REVMAP[hsum_len], ) ) ret['hsum'] = ret['hsum'].lower() return ret
[ "def", "get_source_sum", "(", "file_name", "=", "''", ",", "source", "=", "''", ",", "source_hash", "=", "None", ",", "source_hash_name", "=", "None", ",", "saltenv", "=", "'base'", ")", ":", "def", "_invalid_source_hash_format", "(", ")", ":", "'''\n ...
.. versionadded:: 2016.11.0 Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to obtain the hash and hash type from the parameters specified below. file_name Optional file name being managed, for matching with :py:func:`file.extract_hash <salt.modules.file.extract_hash>`. source Source file, as used in :py:mod:`file <salt.states.file>` and other states. If ``source_hash`` refers to a file containing hashes, then this filename will be used to match a filename in that file. If the ``source_hash`` is a hash expression, then this argument will be ignored. source_hash Hash file/expression, as used in :py:mod:`file <salt.states.file>` and other states. If this value refers to a remote URL or absolute path to a local file, it will be cached and :py:func:`file.extract_hash <salt.modules.file.extract_hash>` will be used to obtain a hash from it. source_hash_name Specific file name to look for when ``source_hash`` refers to a remote file, used to disambiguate ambiguous matches. saltenv : base Salt fileserver environment from which to retrieve the source_hash. This value will only be used when ``source_hash`` refers to a file on the Salt fileserver (i.e. one beginning with ``salt://``). CLI Example: .. code-block:: bash salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz
[ "..", "versionadded", "::", "2016", ".", "11", ".", "0" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L710-L848
train
saltstack/salt
salt/modules/file.py
check_hash
def check_hash(path, file_hash): ''' Check if a file matches the given hash string Returns ``True`` if the hash matches, otherwise ``False``. path Path to a file local to the minion. hash The hash to check against the file specified in the ``path`` argument. .. versionchanged:: 2016.11.4 For this and newer versions the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>=<hash_value>`` (e.g. ``md5=e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22 ''' path = os.path.expanduser(path) if not isinstance(file_hash, six.string_types): raise SaltInvocationError('hash must be a string') for sep in (':', '='): if sep in file_hash: hash_type, hash_value = file_hash.split(sep, 1) break else: hash_value = file_hash hash_len = len(file_hash) hash_type = HASHES_REVMAP.get(hash_len) if hash_type is None: raise SaltInvocationError( 'Hash {0} (length: {1}) could not be matched to a supported ' 'hash type. The supported hash types and lengths are: ' '{2}'.format( file_hash, hash_len, ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) return get_hash(path, hash_type) == hash_value
python
def check_hash(path, file_hash): ''' Check if a file matches the given hash string Returns ``True`` if the hash matches, otherwise ``False``. path Path to a file local to the minion. hash The hash to check against the file specified in the ``path`` argument. .. versionchanged:: 2016.11.4 For this and newer versions the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>=<hash_value>`` (e.g. ``md5=e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22 ''' path = os.path.expanduser(path) if not isinstance(file_hash, six.string_types): raise SaltInvocationError('hash must be a string') for sep in (':', '='): if sep in file_hash: hash_type, hash_value = file_hash.split(sep, 1) break else: hash_value = file_hash hash_len = len(file_hash) hash_type = HASHES_REVMAP.get(hash_len) if hash_type is None: raise SaltInvocationError( 'Hash {0} (length: {1}) could not be matched to a supported ' 'hash type. The supported hash types and lengths are: ' '{2}'.format( file_hash, hash_len, ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) return get_hash(path, hash_type) == hash_value
[ "def", "check_hash", "(", "path", ",", "file_hash", ")", ":", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "if", "not", "isinstance", "(", "file_hash", ",", "six", ".", "string_types", ")", ":", "raise", "SaltInvocationError", "("...
Check if a file matches the given hash string Returns ``True`` if the hash matches, otherwise ``False``. path Path to a file local to the minion. hash The hash to check against the file specified in the ``path`` argument. .. versionchanged:: 2016.11.4 For this and newer versions the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>=<hash_value>`` (e.g. ``md5=e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
[ "Check", "if", "a", "file", "matches", "the", "given", "hash", "string" ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L851-L905
train
saltstack/salt
salt/modules/file.py
find
def find(path, *args, **kwargs): ''' Approximate the Unix ``find(1)`` command and return a list of paths that meet the specified criteria. The options include match criteria: .. code-block:: text name = path-glob # case sensitive iname = path-glob # case insensitive regex = path-regex # case sensitive iregex = path-regex # case insensitive type = file-types # match any listed type user = users # match any listed user group = groups # match any listed group size = [+-]number[size-unit] # default unit = byte mtime = interval # modified since date grep = regex # search file contents and/or actions: .. code-block:: text delete [= file-types] # default type = 'f' exec = command [arg ...] # where {} is replaced by pathname print [= print-opts] and/or depth criteria: .. code-block:: text maxdepth = maximum depth to transverse in path mindepth = minimum depth to transverse before checking files or directories The default action is ``print=path`` ``path-glob``: .. code-block:: text * = match zero or more chars ? = match any char [abc] = match a, b, or c [!abc] or [^abc] = match anything except a, b, and c [x-y] = match chars x through y [!x-y] or [^x-y] = match anything except chars x through y {a,b,c} = match a or b or c ``path-regex``: a Python Regex (regular expression) pattern to match pathnames ``file-types``: a string of one or more of the following: .. code-block:: text a: all file types b: block device c: character device d: directory p: FIFO (named pipe) f: plain file l: symlink s: socket ``users``: a space and/or comma separated list of user names and/or uids ``groups``: a space and/or comma separated list of group names and/or gids ``size-unit``: .. code-block:: text b: bytes k: kilobytes m: megabytes g: gigabytes t: terabytes interval: .. code-block:: text [<num>w] [<num>d] [<num>h] [<num>m] [<num>s] where: w: week d: day h: hour m: minute s: second print-opts: a comma and/or space separated list of one or more of the following: .. code-block:: text group: group name md5: MD5 digest of file contents mode: file permissions (as integer) mtime: last modification time (as time_t) name: file basename path: file absolute path size: file size in bytes type: file type user: user name CLI Examples: .. code-block:: bash salt '*' file.find / type=f name=\\*.bak size=+10m salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete ''' if 'delete' in args: kwargs['delete'] = 'f' elif 'print' in args: kwargs['print'] = 'path' try: finder = salt.utils.find.Finder(kwargs) except ValueError as ex: return 'error: {0}'.format(ex) ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i] ret.sort() return ret
python
def find(path, *args, **kwargs): ''' Approximate the Unix ``find(1)`` command and return a list of paths that meet the specified criteria. The options include match criteria: .. code-block:: text name = path-glob # case sensitive iname = path-glob # case insensitive regex = path-regex # case sensitive iregex = path-regex # case insensitive type = file-types # match any listed type user = users # match any listed user group = groups # match any listed group size = [+-]number[size-unit] # default unit = byte mtime = interval # modified since date grep = regex # search file contents and/or actions: .. code-block:: text delete [= file-types] # default type = 'f' exec = command [arg ...] # where {} is replaced by pathname print [= print-opts] and/or depth criteria: .. code-block:: text maxdepth = maximum depth to transverse in path mindepth = minimum depth to transverse before checking files or directories The default action is ``print=path`` ``path-glob``: .. code-block:: text * = match zero or more chars ? = match any char [abc] = match a, b, or c [!abc] or [^abc] = match anything except a, b, and c [x-y] = match chars x through y [!x-y] or [^x-y] = match anything except chars x through y {a,b,c} = match a or b or c ``path-regex``: a Python Regex (regular expression) pattern to match pathnames ``file-types``: a string of one or more of the following: .. code-block:: text a: all file types b: block device c: character device d: directory p: FIFO (named pipe) f: plain file l: symlink s: socket ``users``: a space and/or comma separated list of user names and/or uids ``groups``: a space and/or comma separated list of group names and/or gids ``size-unit``: .. code-block:: text b: bytes k: kilobytes m: megabytes g: gigabytes t: terabytes interval: .. code-block:: text [<num>w] [<num>d] [<num>h] [<num>m] [<num>s] where: w: week d: day h: hour m: minute s: second print-opts: a comma and/or space separated list of one or more of the following: .. code-block:: text group: group name md5: MD5 digest of file contents mode: file permissions (as integer) mtime: last modification time (as time_t) name: file basename path: file absolute path size: file size in bytes type: file type user: user name CLI Examples: .. code-block:: bash salt '*' file.find / type=f name=\\*.bak size=+10m salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete ''' if 'delete' in args: kwargs['delete'] = 'f' elif 'print' in args: kwargs['print'] = 'path' try: finder = salt.utils.find.Finder(kwargs) except ValueError as ex: return 'error: {0}'.format(ex) ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i] ret.sort() return ret
[ "def", "find", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'delete'", "in", "args", ":", "kwargs", "[", "'delete'", "]", "=", "'f'", "elif", "'print'", "in", "args", ":", "kwargs", "[", "'print'", "]", "=", "'path'", ...
Approximate the Unix ``find(1)`` command and return a list of paths that meet the specified criteria. The options include match criteria: .. code-block:: text name = path-glob # case sensitive iname = path-glob # case insensitive regex = path-regex # case sensitive iregex = path-regex # case insensitive type = file-types # match any listed type user = users # match any listed user group = groups # match any listed group size = [+-]number[size-unit] # default unit = byte mtime = interval # modified since date grep = regex # search file contents and/or actions: .. code-block:: text delete [= file-types] # default type = 'f' exec = command [arg ...] # where {} is replaced by pathname print [= print-opts] and/or depth criteria: .. code-block:: text maxdepth = maximum depth to transverse in path mindepth = minimum depth to transverse before checking files or directories The default action is ``print=path`` ``path-glob``: .. code-block:: text * = match zero or more chars ? = match any char [abc] = match a, b, or c [!abc] or [^abc] = match anything except a, b, and c [x-y] = match chars x through y [!x-y] or [^x-y] = match anything except chars x through y {a,b,c} = match a or b or c ``path-regex``: a Python Regex (regular expression) pattern to match pathnames ``file-types``: a string of one or more of the following: .. code-block:: text a: all file types b: block device c: character device d: directory p: FIFO (named pipe) f: plain file l: symlink s: socket ``users``: a space and/or comma separated list of user names and/or uids ``groups``: a space and/or comma separated list of group names and/or gids ``size-unit``: .. code-block:: text b: bytes k: kilobytes m: megabytes g: gigabytes t: terabytes interval: .. code-block:: text [<num>w] [<num>d] [<num>h] [<num>m] [<num>s] where: w: week d: day h: hour m: minute s: second print-opts: a comma and/or space separated list of one or more of the following: .. code-block:: text group: group name md5: MD5 digest of file contents mode: file permissions (as integer) mtime: last modification time (as time_t) name: file basename path: file absolute path size: file size in bytes type: file type user: user name CLI Examples: .. code-block:: bash salt '*' file.find / type=f name=\\*.bak size=+10m salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
[ "Approximate", "the", "Unix", "find", "(", "1", ")", "command", "and", "return", "a", "list", "of", "paths", "that", "meet", "the", "specified", "criteria", "." ]
e8541fd6e744ab0df786c0f76102e41631f45d46
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L908-L1034
train