idx
int64
0
41.8k
question
stringlengths
69
3.84k
target
stringlengths
11
1.18k
100
func NewParentLogger ( name string , factory LoggerFactory ) * ParentLogger { return & ParentLogger { Logger : factory . NewLogger ( name ) , Prefix : name , Factory : factory , } }
NewParentLogger creates new parent logger with given LoggerFactory and name as prefix .
101
func ( p * ParentLogger ) NewLogger ( name string ) Logger { factory := p . Factory if factory == nil { factory = DefaultRegistry } return factory . NewLogger ( fmt . Sprintf ( "%s.%s" , p . Prefix , name ) ) }
NewLogger returns logger using name prefixed with prefix defined in parent logger . If Factory is nil DefaultRegistry is used .
102
func ( p * Plugin ) AfterInit ( ) error { if p . disabled { p . Log . Debugf ( "kafka plugin disabled, skipping AfterInit" ) return nil } if p . mux != nil { err := p . mux . Start ( ) if err != nil { return err } } if p . StatusCheck != nil { p . StatusCheck . Register ( p . PluginName , func ( ) ( statu...
AfterInit is called in the second phase of the initialization . The kafka multiplexerNewWatcher is started all consumers have to be subscribed until this phase .
103
func ( p * Plugin ) Close ( ) error { return safeclose . Close ( p . hsClient , p . manClient , p . mux ) }
Close is called at plugin cleanup phase .
104
func ( p * Plugin ) NewSyncPublisher ( connectionName string , topic string ) ( messaging . ProtoPublisher , error ) { return p . NewProtoConnection ( connectionName ) . NewSyncPublisher ( topic ) }
NewSyncPublisher creates a publisher that allows to publish messages using synchronous API . The publisher creates new proto connection on multiplexer with default partitioner .
105
func ( p * Plugin ) NewSyncPublisherToPartition ( connectionName string , topic string , partition int32 ) ( messaging . ProtoPublisher , error ) { return p . NewProtoManualConnection ( connectionName ) . NewSyncPublisherToPartition ( topic , partition ) }
NewSyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using synchronous API . The publisher creates new proto connection on multiplexer with manual partitioner .
106
func ( p * Plugin ) NewAsyncPublisher ( connectionName string , topic string , successClb func ( messaging . ProtoMessage ) , errorClb func ( messaging . ProtoMessageErr ) ) ( messaging . ProtoPublisher , error ) { return p . NewProtoConnection ( connectionName ) . NewAsyncPublisher ( topic , successClb , errorClb ) ...
NewAsyncPublisher creates a publisher that allows to publish messages using asynchronous API . The publisher creates new proto connection on multiplexer with default partitioner .
107
func ( p * Plugin ) NewAsyncPublisherToPartition ( connectionName string , topic string , partition int32 , successClb func ( messaging . ProtoMessage ) , errorClb func ( messaging . ProtoMessageErr ) ) ( messaging . ProtoPublisher , error ) { return p . NewProtoManualConnection ( connectionName ) . NewAsyncPublisherTo...
NewAsyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using asynchronous API . The publisher creates new proto connection on multiplexer with manual partitioner .
108
func ( p * Plugin ) getClientConfig ( config * mux . Config , logger logging . Logger , topic string ) ( * client . Config , error ) { clientCfg := client . NewConfig ( logger ) if len ( config . Addrs ) > 0 { clientCfg . SetBrokers ( config . Addrs ... ) } else { clientCfg . SetBrokers ( mux . DefAddress ) } i...
Receive client config according to kafka config data
109
func NewKeyVal ( key string , value datasync . LazyValue , rev int64 ) * KeyVal { return & KeyVal { key , value , rev } }
NewKeyVal creates a new instance of KeyVal .
110
func NewKeyValBytes ( key string , value [ ] byte , rev int64 ) * KeyValBytes { return & KeyValBytes { key , value , rev } }
NewKeyValBytes creates a new instance of KeyValBytes .
111
func ( fsh * Handler ) CreateFile ( file string ) error { path , _ := filepath . Split ( file ) if path != "" { if err := os . MkdirAll ( path , os . ModePerm ) ; err != nil { return errors . Errorf ( "failed to create path for file %s: %v" , file , err ) } } sf , err := os . Create ( file ) if err != nil { r...
CreateFile is an implementation of the file system API interface
112
func ( fsh * Handler ) ReadFile ( file string ) ( [ ] byte , error ) { return ioutil . ReadFile ( file ) }
ReadFile is an implementation of the file system API interface
113
func ( fsh * Handler ) WriteFile ( file string , data [ ] byte ) error { fileObj , err := os . OpenFile ( file , os . O_TRUNC | os . O_WRONLY , os . ModePerm ) if err != nil { return fmt . Errorf ( "failed to open status file %s for writing: %v" , file , err ) } defer fileObj . Close ( ) if _ , err := fileObj ....
WriteFile is an implementation of the file system API interface
114
func ( fsh * Handler ) FileExists ( file string ) bool { if _ , err := os . Stat ( file ) ; os . IsNotExist ( err ) { return false } return true }
FileExists is an implementation of the file system API interface
115
func ( fsh * Handler ) GetFileNames ( paths [ ] string ) ( files [ ] string , err error ) { for _ , path := range paths { err := filepath . Walk ( path , func ( path string , info os . FileInfo , err error ) error { if err != nil { return err } if info == nil || info . IsDir ( ) { return nil } files = append ( ...
GetFileNames is an implementation of the file system API interface
116
func ( fsh * Handler ) Watch ( paths [ ] string , onEvent func ( event fsnotify . Event ) , onClose func ( ) ) error { var err error fsh . watcher , err = fsnotify . NewWatcher ( ) if err != nil { return errors . Errorf ( "failed to init fileDB file system watcher: %v" , err ) } for _ , path := range paths { fs...
Watch starts new filesystem notification watcher . All events from files are passed to onEvent function . Function onClose is called when event channel is closed .
117
func ( fsh * Handler ) getFilesInPath ( files [ ] string , path string ) error { pathInfo , err := os . Stat ( path ) if err != nil { return errors . Errorf ( "failed to read path %s: %v" , path , err ) } if pathInfo . IsDir ( ) { pathList , err := ioutil . ReadDir ( path ) if err != nil { return errors . Error...
Processes given path . If the target is a file it is stored in the file list . If the target is a directory function is called recursively on nested paths in order to process the whole tree .
118
func Mock ( t * testing . T ) * KafkaMock { asyncP , aMock := client . GetAsyncProducerMock ( t ) syncP , sMock := client . GetSyncProducerMock ( t ) producers := multiplexerProducers { syncP , syncP , asyncP , asyncP , } return & KafkaMock { NewMultiplexer ( getMockConsumerFactory ( t ) , producers , & client . ...
Mock returns mock of Multiplexer that can be used for testing purposes .
119
func ( p * HelloUniverse ) RegisterWorld ( name string , size int ) { p . worlds [ name ] = size log . Printf ( "World %s (size %d) was registered" , name , size ) }
RegisterWorld is exported for other plugins to use
120
func safeClose ( obj interface { } ) error { defer func ( ) { if r := recover ( ) ; r != nil { logrus . DefaultLogger ( ) . Error ( "Recovered in safeclose: " , r ) } } ( ) type closerWithoutErr interface { Close ( ) } if val := reflect . ValueOf ( obj ) ; val . IsValid ( ) && ! val . IsNil ( ) { if closer , ...
safeClose closes closable object .
121
func Close ( objs ... interface { } ) error { errs := make ( [ ] error , len ( objs ) ) for i , obj := range objs { errs [ i ] = safeClose ( obj ) } for _ , err := range errs { if err != nil { return CloseErrors ( errs ) } } return nil }
Close tries to close all objects and return all errors using CloseErrors if there are any .
122
func main ( ) { var debug bool var redisConfigPath string flag . BoolVar ( & debug , "debug" , false , "Enable debugging" ) flag . StringVar ( & redisConfigPath , "redis-config" , "" , "Redis configuration file path" ) flag . Parse ( ) log := logrus . DefaultLogger ( ) if debug { log . SetLevel ( logging . ...
Initialize airport and start serving
123
func ( a * Airport ) init ( config interface { } , doneChan chan struct { } ) ( err error ) { a . log . Info ( "Airport redis example. If you need more info about what is happening, run example with -debug=true" ) rand . Seed ( time . Now ( ) . UnixNano ( ) ) printHeaders ( ) setupFlightStatusFormat ( ) a . cli...
Set all required brokers watchers prepare redis connection
124
func ( a * Airport ) processArrivals ( ) { for { arrival , ok := <- a . arrivalChan if ! ok { a . log . Errorf ( "arrival channel closed" ) return } switch arrival . GetChangeType ( ) { case datasync . Put : fl := flight . Info { } if err := arrival . GetValue ( & fl ) ; err != nil { a . log . Errorf ( "faile...
Wait for arrivals . Incoming flights are set to arrival status and sent to runway .
125
func ( a * Airport ) processDepartures ( ) { for { departure , ok := <- a . departureChan if ! ok { a . log . Errorf ( "departure channel closed" ) return } switch departure . GetChangeType ( ) { case datasync . Put : fl := flight . Info { } if err := departure . GetValue ( & fl ) ; err != nil { a . log . Err...
Wait for departures . Outgoing flights are set to departure and sent to runway
126
func ( a * Airport ) processHangar ( ) { for { hangar , ok := <- a . hangarChan if ! ok { a . log . Errorf ( "hangar channel closed" ) return } switch hangar . GetChangeType ( ) { case datasync . Put : a . log . Debugf ( "hangar %s updated" , hangar . GetKey ( ) ) case datasync . Delete : fl := flight . Info ...
Wait for hangar . Incoming flights are stored outgoing are sent to departure .
127
func ( ev * ChangeEvent ) Done ( err error ) { if err != nil { logrus . DefaultLogger ( ) . Error ( err ) } }
Done does nothing yet .
128
func ( ev * ChangeWatchResp ) GetChangeType ( ) datasync . Op { if ev . message . OperationType == PutDel_DEL { return datasync . Delete } return datasync . Put }
GetChangeType - see the comment in implemented interface datasync . ChangeEvent .
129
func ( ev * ChangeWatchResp ) GetValue ( val proto . Message ) error { return json . Unmarshal ( ev . message . Content , val ) }
GetValue - see the comments in the interface datasync . ChangeEvent .
130
func NewAdapter ( grpcServer * grpc . Server ) * Adapter { adapter := & Adapter { base : syncbase . NewRegistry ( ) , server : grpcServer , } msg . RegisterDataMsgServiceServer ( grpcServer , & DataMsgServiceServer { adapter } ) return adapter }
NewAdapter creates a new instance of Adapter .
131
func ( sr * SimpleRedis ) Close ( ) error { return safeclose . Close ( sr . client , sr . respChan , sr . closeChan ) }
Close close the redis client and watcher channels
132
func UseKV ( kv keyval . KvProtoPlugin ) Option { return func ( p * Plugin ) { p . KvPlugin = kv } }
UseKV returns Option that sets KvPlugin dependency .
133
func ( wr * protoWatchResp ) GetValue ( msg proto . Message ) error { return wr . serializer . Unmarshal ( wr . BytesWatchResp . GetValue ( ) , msg ) }
GetValue returns the value after the change .
134
func ( wr * protoWatchResp ) GetPrevValue ( msg proto . Message ) ( prevValueExist bool , err error ) { prevVal := wr . BytesWatchResp . GetPrevValue ( ) if prevVal == nil { return false , nil } err = wr . serializer . Unmarshal ( prevVal , msg ) if err != nil { return true , err } return true , nil }
GetPrevValue returns the previous value after the change .
135
func ( r * Reader ) ReadStatusFromFile ( file * os . File ) * File { return r . parse ( file ) }
ReadStatusFromFile allows to eventually read status from custom location and parse it directly
136
func ( r * Reader ) toInt ( input string ) int { result , err := strconv . Atoi ( prune ( input ) ) if err != nil { return - 1 } return result }
This method should save a few lines converting provided string to int while error is logged but not returned
137
func NewProtoTxn ( commit func ( context . Context , map [ string ] datasync . ChangeValue ) error ) * ProtoTxn { return & ProtoTxn { items : make ( map [ string ] * protoTxnItem ) , commit : commit , } }
NewProtoTxn is a constructor .
138
func ( txn * ProtoTxn ) Delete ( key string ) keyval . ProtoTxn { txn . access . Lock ( ) defer txn . access . Unlock ( ) txn . items [ key ] = & protoTxnItem { delete : true } return txn }
Delete adds delete operation into transaction .
139
func sendMessage ( producer * client . SyncProducer , msg utils . Message ) error { var ( msgKey [ ] byte msgValue [ ] byte ) if msg . Key != "" { msgKey = [ ] byte ( msg . Key ) } msgValue = [ ] byte ( msg . Text ) _ , err := producer . SendMsgByte ( msg . Topic , msgKey , msgValue ) if err != nil { logr...
sendMessage demonstrates SyncProducer . SendMsgByte API to publish a single message to a Kafka topic .
140
func ( conn * BytesConnectionStr ) NewSyncPublisher ( topic string ) ( BytesPublisher , error ) { return & bytesSyncPublisherKafka { conn , topic } , nil }
NewSyncPublisher creates a new instance of bytesSyncPublisherKafka that allows to publish sync kafka messages using common messaging API
141
func ( conn * BytesConnectionStr ) NewAsyncPublisher ( topic string , successClb func ( * client . ProducerMessage ) , errorClb func ( err * client . ProducerError ) ) ( BytesPublisher , error ) { return & bytesAsyncPublisherKafka { conn , topic , successClb , errorClb } , nil }
NewAsyncPublisher creates a new instance of bytesAsyncPublisherKafka that allows to publish async kafka messages using common messaging API
142
func ( conn * BytesManualConnectionStr ) NewSyncPublisherToPartition ( topic string , partition int32 ) ( BytesPublisher , error ) { return & bytesManualSyncPublisherKafka { conn , topic , partition } , nil }
NewSyncPublisherToPartition creates a new instance of bytesSyncPublisherKafka that allows to publish sync kafka messages using common messaging API
143
func ( conn * BytesManualConnectionStr ) NewAsyncPublisherToPartition ( topic string , partition int32 , successClb func ( * client . ProducerMessage ) , errorClb func ( err * client . ProducerError ) ) ( BytesPublisher , error ) { return & bytesManualAsyncPublisherKafka { conn , topic , partition , successClb , errorC...
NewAsyncPublisherToPartition creates a new instance of bytesAsyncPublisherKafka that allows to publish async kafka messages using common messaging API
144
func ( conn * BytesConnectionStr ) ConsumeTopic ( msgClb func ( message * client . ConsumerMessage ) , topics ... string ) error { conn . multiplexer . rwlock . Lock ( ) defer conn . multiplexer . rwlock . Unlock ( ) if conn . multiplexer . started { return fmt . Errorf ( "ConsumeTopic can be called only if the mul...
ConsumeTopic is called to start consuming of a topic . Function can be called until the multiplexer is started it returns an error otherwise . The provided channel should be buffered otherwise messages might be lost .
145
func ( conn * BytesManualConnectionStr ) StartPostInitConsumer ( topic string , partition int32 , offset int64 ) ( * sarama . PartitionConsumer , error ) { multiplexer := conn . multiplexer multiplexer . WithFields ( logging . Fields { "topic" : topic } ) . Debugf ( "Post-init consuming started" ) if multiplexer . ...
StartPostInitConsumer allows to start a new partition consumer after mux is initialized
146
func ( conn * BytesConnectionStr ) StopConsuming ( topic string ) error { return conn . multiplexer . stopConsuming ( topic , conn . name ) }
StopConsuming cancels the previously created subscription for consuming the topic .
147
func ( conn * BytesConnectionStr ) SendSyncMessage ( topic string , key client . Encoder , value client . Encoder ) ( offset int64 , err error ) { msg , err := conn . multiplexer . hashSyncProducer . SendMsgToPartition ( topic , DefPartition , key , value ) if err != nil { return 0 , err } return msg . Offset , e...
SendSyncMessage sends a message using the sync API and default partitioner
148
func ( conn * BytesConnectionStr ) SendAsyncMessage ( topic string , key client . Encoder , value client . Encoder , meta interface { } , successClb func ( * client . ProducerMessage ) , errClb func ( * client . ProducerError ) ) { auxMeta := & asyncMeta { successClb : successClb , errorClb : errClb , usersMeta : meta ...
SendAsyncMessage sends a message using the async API and default partitioner
149
func ( conn * BytesManualConnectionStr ) SendSyncMessageToPartition ( topic string , partition int32 , key client . Encoder , value client . Encoder ) ( offset int64 , err error ) { msg , err := conn . multiplexer . manSyncProducer . SendMsgToPartition ( topic , partition , key , value ) if err != nil { return 0 , er...
SendSyncMessageToPartition sends a message using the sync API and default partitioner
150
func ( conn * BytesManualConnectionStr ) SendAsyncMessageToPartition ( topic string , partition int32 , key client . Encoder , value client . Encoder , meta interface { } , successClb func ( * client . ProducerMessage ) , errClb func ( * client . ProducerError ) ) { auxMeta := & asyncMeta { successClb : successClb , er...
SendAsyncMessageToPartition sends a message using the async API and default partitioner
151
func ( conn * BytesConnectionStr ) SendSyncByte ( topic string , key [ ] byte , value [ ] byte ) ( offset int64 , err error ) { return conn . SendSyncMessage ( topic , sarama . ByteEncoder ( key ) , sarama . ByteEncoder ( value ) ) }
SendSyncByte sends a message that uses byte encoder using the sync API
152
func ( conn * BytesConnectionStr ) SendSyncString ( topic string , key string , value string ) ( offset int64 , err error ) { return conn . SendSyncMessage ( topic , sarama . StringEncoder ( key ) , sarama . StringEncoder ( value ) ) }
SendSyncString sends a message that uses string encoder using the sync API
153
func ( conn * BytesManualConnectionStr ) SendSyncStringToPartition ( topic string , partition int32 , key string , value string ) ( offset int64 , err error ) { return conn . SendSyncMessageToPartition ( topic , partition , sarama . StringEncoder ( key ) , sarama . StringEncoder ( value ) ) }
SendSyncStringToPartition sends a message that uses string encoder using the sync API to custom partition
154
func ( conn * BytesConnectionStr ) SendAsyncByte ( topic string , key [ ] byte , value [ ] byte , meta interface { } , successClb func ( * client . ProducerMessage ) , errClb func ( * client . ProducerError ) ) { conn . SendAsyncMessage ( topic , sarama . ByteEncoder ( key ) , sarama . ByteEncoder ( value ) , meta , su...
SendAsyncByte sends a message that uses byte encoder using the async API
155
func ( conn * BytesConnectionStr ) SendAsyncString ( topic string , key string , value string , meta interface { } , successClb func ( * client . ProducerMessage ) , errClb func ( * client . ProducerError ) ) { conn . SendAsyncMessage ( topic , sarama . StringEncoder ( key ) , sarama . StringEncoder ( value ) , meta , ...
SendAsyncString sends a message that uses string encoder using the async API
156
func ( conn * BytesManualConnectionStr ) SendAsyncStringToPartition ( topic string , partition int32 , key string , value string , meta interface { } , successClb func ( * client . ProducerMessage ) , errClb func ( * client . ProducerError ) ) { conn . SendAsyncMessageToPartition ( topic , partition , sarama . StringEn...
SendAsyncStringToPartition sends a message that uses string encoder using the async API to custom partition
157
func ( conn * BytesConnectionFields ) CommitOffsets ( ) error { if conn . multiplexer != nil && conn . multiplexer . Consumer != nil { return conn . multiplexer . Consumer . CommitOffsets ( ) } return fmt . Errorf ( "cannot commit offsets, consumer not available" ) }
CommitOffsets manually commits message offsets
158
func NewProtoWrapper ( db keyval . CoreBrokerWatcher , serializer ... keyval . Serializer ) * ProtoWrapper { if len ( serializer ) > 0 { return & ProtoWrapper { db , serializer [ 0 ] } } return & ProtoWrapper { db , & keyval . SerializerProto { } } }
NewProtoWrapper initializes proto decorator . The default serializer is used - SerializerProto .
159
func NewProtoWrapperWithSerializer ( db keyval . CoreBrokerWatcher , serializer keyval . Serializer ) * ProtoWrapper { return NewProtoWrapper ( db , serializer ) }
NewProtoWrapperWithSerializer initializes proto decorator with the specified serializer .
160
func ( db * ProtoWrapper ) NewWatcher ( prefix string ) keyval . ProtoWatcher { return & protoWatcher { db . broker . NewWatcher ( prefix ) , db . serializer } }
NewWatcher creates a new instance of the proxy that shares the underlying connection and allows subscribing for watching of the changes .
161
func ( p * Plugin ) getServiceHealth ( ) float64 { agentStatus := p . StatusCheck . GetAgentStatus ( ) health := float64 ( agentStatus . State ) p . Log . Infof ( "ServiceHealth: %v" , health ) return health }
getServiceHealth returns agent health status
162
func ( p * Plugin ) getDependencyHealth ( pluginName string , pluginStatus * status . PluginStatus ) func ( ) float64 { p . Log . Infof ( "DependencyHealth for plugin %v: %v" , pluginName , float64 ( pluginStatus . State ) ) return func ( ) float64 { health := float64 ( pluginStatus . State ) p . Log . Infof ( "Dep...
getDependencyHealth returns plugin health status
163
func ( plugin * ExamplePlugin ) watchChanges ( x datasync . ProtoWatchResp ) { message := & ipsec . TunnelInterfaces { } err := x . GetValue ( message ) if err == nil { plugin . Log . Infof ( "Got watch message %v" , message ) } }
watchChanges is watching for changes in DB
164
func ( p * Plugin ) Init ( ) ( err error ) { var config Config found , err := p . Cfg . LoadValue ( & config ) if err != nil { return err } if ! found { p . Log . Info ( "cryptodata config not found, skip loading this plugin" ) p . disabled = true return nil } clientConfig := ClientConfig { } for _ , ...
Init initializes cryptodata plugin .
165
func NewProtoConsumerMessage ( msg * ConsumerMessage , serializer keyval . Serializer ) * ProtoConsumerMessage { return & ProtoConsumerMessage { msg , serializer } }
NewProtoConsumerMessage creates new instance of ProtoConsumerMessage
166
func ( cm * ProtoConsumerMessage ) GetValue ( msg proto . Message ) error { err := cm . serializer . Unmarshal ( cm . ConsumerMessage . GetValue ( ) , msg ) if err != nil { return err } return nil }
GetValue returns the value associated with the message .
167
func ( cm * ProtoConsumerMessage ) GetPrevValue ( msg proto . Message ) ( prevValueExist bool , err error ) { prevVal := cm . ConsumerMessage . GetPrevValue ( ) if prevVal == nil { return false , nil } err = cm . serializer . Unmarshal ( prevVal , msg ) if err != nil { return true , err } return true , nil ...
GetPrevValue returns the previous value associated with the latest message .
168
func ( pm * ProducerMessage ) GetValue ( ) [ ] byte { val , _ := pm . Value . Encode ( ) return val }
GetValue returns the content of the message .
169
func ( ppm * ProtoProducerMessage ) GetValue ( msg proto . Message ) error { err := ppm . Serializer . Unmarshal ( ppm . ProducerMessage . GetValue ( ) , msg ) if err != nil { return err } return nil }
GetValue unmarshalls the content of the msg into provided structure .
170
func NewClient ( clientConfig ClientConfig ) * Client { client := & Client { ClientConfig : clientConfig , } if clientConfig . Reader == nil { client . Reader = rand . Reader } if clientConfig . Hash == nil { client . Hash = sha256 . New ( ) } return client }
NewClient creates new client from provided config and reader
171
func ( client * Client ) EncryptData ( inData [ ] byte , pub * rsa . PublicKey ) ( data [ ] byte , err error ) { return rsa . EncryptOAEP ( client . Hash , client . Reader , pub , inData , nil ) }
EncryptData implements ClientAPI . EncryptData
172
func ( client * Client ) DecryptData ( inData [ ] byte ) ( data [ ] byte , err error ) { for _ , key := range client . PrivateKeys { data , err := rsa . DecryptOAEP ( client . Hash , client . Reader , key , inData , nil ) if err == nil { return data , nil } } return nil , errors . New ( "failed to decrypt data ...
DecryptData implements ClientAPI . DecryptData
173
func ( client * Client ) WrapBytes ( cbw keyval . KvBytesPlugin , decrypter ArbitraryDecrypter ) keyval . KvBytesPlugin { return NewKvBytesPluginWrapper ( cbw , decrypter , client . DecryptData ) }
WrapBytes implements ClientAPI . WrapBytes
174
func ( client * Client ) WrapProto ( kvp keyval . KvProtoPlugin , decrypter ArbitraryDecrypter ) keyval . KvProtoPlugin { return NewKvProtoPluginWrapper ( kvp , decrypter , client . DecryptData ) }
WrapProto implements ClientAPI . WrapProto
175
func NewBytesTxn ( commit func ( context . Context , map [ string ] datasync . ChangeValue ) error ) * BytesTxn { return & BytesTxn { items : make ( map [ string ] * bytesTxnItem ) , commit : commit , } }
NewBytesTxn is a constructor .
176
func ( txn * BytesTxn ) Delete ( key string ) keyval . BytesTxn { txn . access . Lock ( ) defer txn . access . Unlock ( ) txn . items [ key ] = & bytesTxnItem { delete : true } return txn }
Delete add delete operation into transaction .
177
func ( p * BoltExample ) Init ( ) ( err error ) { db := p . DB . NewBroker ( keyval . Root ) txn := db . NewTxn ( ) txn . Put ( "/agent/config/interface/iface0" , nil ) txn . Put ( "/agent/config/interface/iface1" , nil ) txn . Commit ( context . Background ( ) ) const listPrefix = "/agent/config/interface/" ...
Init demonstrates using Bolt plugin .
178
func ( embd * Embedded ) Start ( t * testing . T ) { dir , err := ioutil . TempDir ( "" , "ETCD" ) if err != nil { t . Error ( err ) t . FailNow ( ) } cfg := embed . NewConfig ( ) cfg . Dir = dir lpurl , _ := url . Parse ( "http://localhost:0" ) lcurl , _ := url . Parse ( "http://localhost:0" ) cfg . LP...
Start starts embedded ETCD .
179
func ( embd * Embedded ) Stop ( ) { embd . ETCD . Close ( ) os . RemoveAll ( embd . tmpDir ) }
Stop stops the embedded ETCD & cleanups the tmp dir .
180
func ( embd * Embedded ) CleanDs ( ) { if embd . client != nil { resp , err := embd . client . Delete ( context . Background ( ) , "" , clientv3 . WithPrefix ( ) ) if err != nil { panic ( err ) } fmt . Printf ( "resp: %+v\n" , \n ) } }
CleanDs deletes all stored key - value pairs .
181
func NewAdapter ( registerHTTPHandler registerHTTPHandler , localtransp * syncbase . Registry ) * Adapter { return & Adapter { registerHTTPHandler : registerHTTPHandler , base : localtransp } }
NewAdapter is a constructor .
182
func ( adapter * Adapter ) Watch ( resyncName string , changeChan chan datasync . ChangeEvent , resyncChan chan datasync . ResyncEvent , keyPrefixes ... string ) ( datasync . WatchRegistration , error ) { logrus . DefaultLogger ( ) . Debug ( "REST KeyValProtoWatcher WatchData " , resyncName , " " , keyPrefixes ) for ...
Watch registers HTTP handlers - basically bridges them with local dbadapter .
183
func watchChannels ( consumer * client . Consumer , cfg * client . Config ) { for { select { case notification , more := <- cfg . RecvNotificationChan : if more { handleNotifcation ( consumer , notification ) } case err , more := <- cfg . RecvErrorChan : if more { fmt . Printf ( "Message Recv Errored: %v\n" , \n ) ...
watchChannels watches channels configured for delivery of Kafka messages notifications and errors .
184
func ( plugin * ExamplePlugin ) Init ( ) error { err := plugin . Prometheus . RegisterGaugeFunc ( prom . DefaultRegistry , "ns" , "sub" , "gaugeOne" , "this metrics represents randomly generated numbers" , prometheus . Labels { "Property1" : "ABC" , "Property2" : "DEF" } , func ( ) float64 { return rand . Float64 ( ) ...
Init creates metric registries and adds gauges
185
func Writer ( outW , errW io . Writer ) POption { return func ( p * POptions ) { p . outWriter , p . errWriter = outW , errW } }
Writer allows to use custom writer instance . Can be defined with nil parameters in such a case standard output will be used
186
func Template ( runOnStartup bool ) POption { return func ( p * POptions ) { p . template = true p . runOnStartup = runOnStartup } }
Template will be created for given process . Process template also requires a flag whether the process should be started automatically with plugin
187
func ( h * HCL ) Setup ( ) error { r , err := h . gen ( ) if err != nil { return err } buf := new ( bytes . Buffer ) buf . ReadFrom ( r ) s := buf . String ( ) obj , err := hcl . Parse ( s ) if err != nil { return err } h . values = make ( map [ string ] interface { } ) if err = hcl . DecodeObject (...
Setup initializes the HCL Checker
188
func ( h * HCL ) Int ( name string ) ( int , error ) { v , err := h . value ( name ) if err != nil { return 0 , err } f , ok := v . ( float64 ) if ! ok { i , ok := v . ( int ) if ! ok { return v . ( int ) , errors . New ( fmt . Sprintf ( "%T unable" , v ) ) } return i , nil } return int ( f ) , nil ...
Int returns an int if it exists within the HCL io . Reader
189
func ( h * HCL ) Bool ( name string ) ( bool , error ) { v , err := h . value ( name ) if err != nil { return false , err } return v . ( bool ) , nil }
Bool returns a bool if it exists within the HCL io . Reader .
190
func ( h * HCL ) String ( name string ) ( string , error ) { v , err := h . value ( name ) if err != nil { return "" , err } return v . ( string ) , nil }
String returns a string if it exists within the HCL io . Reader .
191
func ( c * Configure ) Int ( name string , def int , description string ) * int { i := new ( int ) c . IntVar ( i , name , def , description ) return i }
Int defines an int flag with a name default and description . The return value is a pointer which will be populated with the value of the flag .
192
func ( c * Configure ) String ( name string , def string , description string ) * string { s := new ( string ) c . StringVar ( s , name , def , description ) return s }
String defines a string flag with a name default and description . The return value is a pointer which will be populated with the value of the flag .
193
func ( c * Configure ) Bool ( name string , def bool , description string ) * bool { b := new ( bool ) c . BoolVar ( b , name , def , description ) return b }
Bool defines a bool flag with a name default and description . The return value is a pointer which will be populated with the value of the flag .
194
func ( c * Configure ) option ( value interface { } , name string , def interface { } , description string , typ valueType ) { opt := & option { name : name , def : def , description : description , typ : typ , value : value , } c . options [ name ] = opt }
option will bind a pointer to a value provided in the value parameter to set flag value .
195
func ( c * Configure ) Use ( checkers ... Checker ) { c . stack = append ( c . stack , checkers ... ) }
Use adds a variable amount of Checkers onto the stack .
196
func ( c * Configure ) Parse ( ) { c . setup ( ) for _ , opt := range c . options { changed := false for _ , checker := range c . stack { switch opt . typ { case stringType : s , err := checker . String ( opt . name ) if err != nil { continue } opt . set ( s ) case intType : i , err := checker . Int ( opt ....
Parse populates all of the defined arguments with their values provided by the stacks Checkers .
197
func New ( stack ... Checker ) * Configure { c := & Configure { options : make ( map [ string ] * option ) , stack : stack , } return c }
New returns a pointer to a new Configure instance with a stack provided through the variadic stack variable .
198
func ( e Environment ) Int ( name string ) ( int , error ) { v , err := e . value ( name ) if err != nil { return 0 , err } i , err := strconv . Atoi ( v ) if err != nil { return 0 , err } return i , nil }
Int returns an int if it exists in the set environment variables .
199
func ( e * Environment ) Bool ( name string ) ( bool , error ) { v , err := e . value ( name ) if err != nil { return false , err } return strconv . ParseBool ( v ) }
Bool returns a bool if it exists in the set environment variables .