idx
int64 0
41.8k
| question
stringlengths 65
3.39k
| target
stringlengths 10
1.09k
|
|---|---|---|
0
|
func ( r * ResponseMatcher ) StatusRange ( min , max int ) * ResponseMatcher { if min > max { min , max = max , min
}
r . mux . mu . Lock ( )
defer r . mux . mu . Unlock ( )
r . minStatus = min
r . maxStatus = max
return r
}
|
StatusRange sets a criteria based on the Status code of the response for the Response Handler . Its Handler will only be called if the response has a status code between the min and max . If min is greater than max the values are switched .
|
1
|
func ( r * ResponseMatcher ) Scheme ( scheme string ) * ResponseMatcher { r . mux . mu . Lock ( )
defer r . mux . mu . Unlock ( )
r . scheme = scheme
return r
}
|
Scheme sets a criteria based on the scheme of the URL for the Response Handler . Its Handler will only be called if the scheme of the URL matches exactly the specified scheme .
|
2
|
func ( r * ResponseMatcher ) Host ( host string ) * ResponseMatcher { r . mux . mu . Lock ( )
defer r . mux . mu . Unlock ( )
r . host = host
return r
}
|
Host sets a criteria based on the host of the URL for the Response Handler . Its Handler will only be called if the host of the URL matches exactly the specified host .
|
3
|
func ( r * ResponseMatcher ) Path ( p string ) * ResponseMatcher { r . mux . mu . Lock ( )
defer r . mux . mu . Unlock ( )
r . path = p
return r
}
|
Path sets a criteria based on the path of the URL for the Response Handler . Its Handler will only be called if the path of the URL starts with this path . Longer matches have priority over shorter ones .
|
4
|
func ( r * ResponseMatcher ) Custom ( predicate func ( * http . Response ) bool ) * ResponseMatcher { r . mux . mu . Lock ( )
defer r . mux . mu . Unlock ( )
r . predicate = predicate
return r
}
|
Custom sets a criteria based on a function that receives the HTTP response and returns true if the matcher should be used to handle this response false otherwise .
|
5
|
func ( r * ResponseMatcher ) Handler ( h Handler ) * ResponseMatcher { r . mux . mu . Lock ( )
defer r . mux . mu . Unlock ( )
r . h = h
if ! r . mux . res [ r ] { r . mux . res [ r ] = true
}
return r
}
|
Handler sets the Handler to be called when this Response Handler is the match for a given response . It registers the Response Handler in its parent Mux .
|
6
|
func New ( h Handler ) * Fetcher { return & Fetcher { Handler : h , CrawlDelay : DefaultCrawlDelay , HttpClient : http . DefaultClient , UserAgent : DefaultUserAgent , WorkerIdleTTL : DefaultWorkerIdleTTL , dbg : make ( chan * DebugInfo , 1 ) , }
}
|
New returns an initialized Fetcher .
|
7
|
func ( q * Queue ) Close ( ) error { select { case <- q . closed : return nil
default : close ( q . closed )
q . ch <- nil
q . wg . Wait ( )
close ( q . done )
return nil
}
}
|
Close closes the Queue so that no more Commands can be sent . It blocks until the Fetcher drains all pending commands . After the call the Fetcher is stopped . Attempts to enqueue new URLs after Close has been called will always result in a ErrQueueClosed error .
|
8
|
func ( q * Queue ) Cancel ( ) error { select { case <- q . cancelled : return nil
default : close ( q . cancelled )
return q . Close ( )
}
}
|
Cancel closes the Queue and drains the pending commands without processing them allowing for a fast stop immediately - ish operation .
|
9
|
func ( q * Queue ) Send ( c Command ) error { if c == nil { return ErrEmptyHost
}
if u := c . URL ( ) ; u == nil || u . Host == "" { return ErrEmptyHost
}
select { case <- q . closed : return ErrQueueClosed
default : q . ch <- c
}
return nil
}
|
Send enqueues a Command into the Fetcher . If the Queue has been closed it returns ErrQueueClosed . The Command s URL must have a Host .
|
10
|
func ( q * Queue ) SendString ( method string , rawurl ... string ) ( int , error ) { return q . sendWithMethod ( method , rawurl )
}
|
SendString enqueues a method and some URL strings into the Fetcher . It returns an error if the URL string cannot be parsed or if the Queue has been closed . The first return value is the number of URLs successfully enqueued .
|
11
|
func ( q * Queue ) SendStringHead ( rawurl ... string ) ( int , error ) { return q . sendWithMethod ( "HEAD" , rawurl )
}
|
SendStringHead enqueues the URL strings to be fetched with a HEAD method . It returns an error if the URL string cannot be parsed or if the Queue has been closed . The first return value is the number of URLs successfully enqueued .
|
12
|
func ( q * Queue ) SendStringGet ( rawurl ... string ) ( int , error ) { return q . sendWithMethod ( "GET" , rawurl )
}
|
SendStringGet enqueues the URL strings to be fetched with a GET method . It returns an error if the URL string cannot be parsed or if the Queue has been closed . The first return value is the number of URLs successfully enqueued .
|
13
|
func ( f * Fetcher ) Start ( ) * Queue { f . hosts = make ( map [ string ] chan Command )
f . q = & Queue { ch : make ( chan Command , 1 ) , closed : make ( chan struct { } ) , cancelled : make ( chan struct { } ) , done : make ( chan struct { } ) , }
f . q . wg . Add ( 1 )
go f . processQueue ( )
return f . q
}
|
Start starts the Fetcher and returns the Queue to use to send Commands to be fetched .
|
14
|
func ( f * Fetcher ) Debug ( ) <- chan * DebugInfo { f . dbgmu . Lock ( )
defer f . dbgmu . Unlock ( )
f . debugging = true
return f . dbg
}
|
Debug returns the channel to use to receive the debugging information . It is not intended to be used by package users .
|
15
|
func ( f * Fetcher ) processQueue ( ) { loop : for v := range f . q . ch { if v == nil { select { case <- f . q . closed : break loop
default : }
}
select { case <- f . q . cancelled : continue
default : }
u := v . URL ( )
f . mu . Lock ( )
in , ok := f . hosts [ u . Host ]
if ! ok { var rob * url . URL
if ! f . DisablePoliteness { rob = u . ResolveReference ( robotsTxtParsedPath )
}
var out chan Command
in , out = make ( chan Command , 1 ) , make ( chan Command , 1 )
f . hosts [ u . Host ] = in
f . mu . Unlock ( )
f . q . wg . Add ( 1 )
go sliceIQ ( in , out )
go f . processChan ( out , u . Host )
if ! f . DisablePoliteness { in <- robotCommand { & Cmd { U : rob , M : "GET" } }
}
} else { f . mu . Unlock ( )
}
in <- v
f . dbgmu . Lock ( )
if f . debugging { f . mu . Lock ( )
select { case f . dbg <- & DebugInfo { len ( f . hosts ) } : default : }
f . mu . Unlock ( )
}
f . dbgmu . Unlock ( )
}
f . mu . Lock ( )
for _ , ch := range f . hosts { close ( ch )
}
f . hosts = make ( map [ string ] chan Command )
f . mu . Unlock ( )
f . q . wg . Done ( )
}
|
processQueue runs the queue in its own goroutine .
|
16
|
func ( f * Fetcher ) processChan ( ch <- chan Command , hostKey string ) { var ( agent * robotstxt . Group
wait <- chan time . Time
ttl <- chan time . Time
delay = f . CrawlDelay
)
loop : for { select { case <- f . q . cancelled : break loop
case v , ok := <- ch : if ! ok { break loop
}
if wait != nil { <- wait
}
select { case <- f . q . cancelled : break loop
default : }
switch r , ok := v . ( robotCommand ) ; { case ok : agent = f . getRobotAgent ( r )
if agent != nil && agent . CrawlDelay > 0 { delay = agent . CrawlDelay
}
wait = time . After ( delay )
case agent == nil || agent . Test ( v . URL ( ) . Path ) : res , err := f . doRequest ( v )
f . visit ( v , res , err )
if err == nil { wait = time . After ( delay )
} else { wait = nil
}
default : f . visit ( v , nil , ErrDisallowed )
wait = nil
}
ttl = time . After ( f . WorkerIdleTTL )
case <- ttl : f . mu . Lock ( )
inch , ok := f . hosts [ hostKey ]
delete ( f . hosts , hostKey )
if f . AutoClose && len ( f . hosts ) == 0 { go f . q . Close ( )
}
f . mu . Unlock ( )
if ok { close ( inch )
}
break loop
}
}
for _ = range ch { }
f . q . wg . Done ( )
}
|
Goroutine for a host s worker processing requests for all its URLs .
|
17
|
func ( f * Fetcher ) getRobotAgent ( r robotCommand ) * robotstxt . Group { res , err := f . doRequest ( r )
if err != nil { fmt . Fprintf ( os . Stderr , "fetchbot: error fetching robots.txt: %s\n" , \n )
err
}
return nil
if res . Body != nil { defer res . Body . Close ( )
}
robData , err := robotstxt . FromResponse ( res )
if err != nil { fmt . Fprintf ( os . Stderr , "fetchbot: error parsing robots.txt: %s\n" , \n )
err
}
}
|
Get the robots . txt User - Agent - specific group .
|
18
|
func ( f * Fetcher ) visit ( cmd Command , res * http . Response , err error ) { if res != nil && res . Body != nil { defer res . Body . Close ( )
}
if h , ok := cmd . ( Handler ) ; ok { h . Handle ( & Context { Cmd : cmd , Q : f . q } , res , err )
return
}
f . Handler . Handle ( & Context { Cmd : cmd , Q : f . q } , res , err )
}
|
Call the Handler for this Command . Closes the response s body .
|
19
|
func ( f * Fetcher ) doRequest ( cmd Command ) ( * http . Response , error ) { req , err := http . NewRequest ( cmd . Method ( ) , cmd . URL ( ) . String ( ) , nil )
if err != nil { return nil , err
}
if hd , ok := cmd . ( HeaderProvider ) ; ok { for k , v := range hd . Header ( ) { req . Header [ k ] = v
}
}
if ba , ok := cmd . ( BasicAuthProvider ) ; ok { req . SetBasicAuth ( ba . BasicAuth ( ) )
}
if ck , ok := cmd . ( CookiesProvider ) ; ok { for _ , c := range ck . Cookies ( ) { req . AddCookie ( c )
}
}
if rd , ok := cmd . ( ReaderProvider ) ; ok { rdr := rd . Reader ( )
rc , ok := rdr . ( io . ReadCloser )
if ! ok { rc = ioutil . NopCloser ( rdr )
}
req . Body = rc
} else if val , ok := cmd . ( ValuesProvider ) ; ok { v := val . Values ( )
req . Body = ioutil . NopCloser ( strings . NewReader ( v . Encode ( ) ) )
if req . Header . Get ( "Content-Type" ) == "" { req . Header . Set ( "Content-Type" , "application/x-www-form-urlencoded" )
}
}
if req . Header . Get ( "User-Agent" ) == "" { req . Header . Set ( "User-Agent" , f . UserAgent )
}
res , err := f . HttpClient . Do ( req )
if err != nil { return nil , err
}
return res , nil
}
|
Prepare and execute the request for this Command .
|
20
|
func sliceIQ ( in <- chan Command , next chan <- Command ) { defer close ( next )
pending := [ ] Command { }
recv : for { if len ( pending ) == 0 { v , ok := <- in
if ! ok { break
}
pending = append ( pending , v )
}
select { case v , ok := <- in : if ! ok { break recv
}
pending = append ( pending , v )
case next <- pending [ 0 ] : pending [ 0 ] = nil
pending = pending [ 1 : ]
}
}
for _ , v := range pending { next <- v
}
}
|
sliceIQ creates an infinite buffered channel taking input on in and sending output to next . SliceIQ should be run in its own goroutine .
|
21
|
func NewPQueue ( pqType PQType ) * PQueue { var cmp func ( int , int ) bool
if pqType == MAXPQ { cmp = max
} else { cmp = min
}
items := make ( [ ] * item , 1 )
items [ 0 ] = nil
return & PQueue { items : items , elemsCount : 0 , comparator : cmp , }
}
|
NewPQueue creates a new priority queue with the provided pqtype ordering type
|
22
|
func ( pq * PQueue ) Push ( value interface { } , priority int ) { item := newItem ( value , priority )
pq . Lock ( )
pq . items = append ( pq . items , item )
pq . elemsCount += 1
pq . swim ( pq . size ( ) )
pq . Unlock ( )
}
|
Push the value item into the priority queue with provided priority .
|
23
|
func ( pq * PQueue ) Size ( ) int { pq . RLock ( )
defer pq . RUnlock ( )
return pq . size ( )
}
|
Size returns the elements present in the priority queue count
|
24
|
func ( pq * PQueue ) Empty ( ) bool { pq . RLock ( )
defer pq . RUnlock ( )
return pq . size ( ) == 0
}
|
Check queue is empty
|
25
|
func NewCappedDeque ( capacity int ) * Deque { return & Deque { container : list . New ( ) , capacity : capacity , }
}
|
NewCappedDeque creates a Deque with the specified capacity limit .
|
26
|
func ( s * Deque ) Size ( ) int { s . RLock ( )
defer s . RUnlock ( )
return s . container . Len ( )
}
|
Size returns the actual deque size
|
27
|
func ( s * Deque ) Capacity ( ) int { s . RLock ( )
defer s . RUnlock ( )
return s . capacity
}
|
Capacity returns the capacity of the deque or - 1 if unlimited
|
28
|
func ( s * Deque ) Empty ( ) bool { s . RLock ( )
defer s . RUnlock ( )
return s . container . Len ( ) == 0
}
|
Empty checks if the deque is empty
|
29
|
func ( s * Deque ) Full ( ) bool { s . RLock ( )
defer s . RUnlock ( )
return s . capacity >= 0 && s . container . Len ( ) >= s . capacity
}
|
Full checks if the deque is full
|
30
|
func AddSchedulerTaskFromTask ( t core . Task ) Task { st := SchedulerTaskFromTask ( t )
( & st ) . assertSchedule ( t . Schedule ( ) )
st . Workflow = t . WMap ( )
return st
}
|
functions to convert a core . Task to a Task
|
31
|
func SecurityTLSEnabled ( certPath , keyPath string , secureSide SecureSide ) GRPCSecurity { return GRPCSecurity { TLSEnabled : true , SecureSide : secureSide , TLSCertPath : certPath , TLSKeyPath : keyPath , }
}
|
SecurityTLSEnabled generates security object for securing gRPC communication
|
32
|
func SecurityTLSExtended ( certPath , keyPath string , secureSide SecureSide , caCertPaths [ ] string ) GRPCSecurity { return GRPCSecurity { TLSEnabled : true , SecureSide : secureSide , TLSCertPath : certPath , TLSKeyPath : keyPath , CACertPaths : caCertPaths , }
}
|
SecurityTLSExtended generates security object for securing gRPC communication . This function accepts also a list of CA cert paths for verifying TLS participant s identity .
|
33
|
func NewCollectorGrpcClient ( address string , timeout time . Duration , security GRPCSecurity ) ( PluginCollectorClient , error ) { ctx := context . Background ( )
p , err := newPluginGrpcClient ( ctx , address , timeout , security , plugin . CollectorPluginType )
if err != nil { return nil , err
}
return p . ( PluginCollectorClient ) , err
}
|
NewCollectorGrpcClient returns a collector gRPC Client .
|
34
|
func NewStreamCollectorGrpcClient ( address string , timeout time . Duration , security GRPCSecurity ) ( PluginStreamCollectorClient , error ) { ctx := context . Background ( )
p , err := newPluginGrpcClient ( ctx , address , timeout , security , plugin . StreamCollectorPluginType )
if err != nil { return nil , err
}
return p . ( PluginStreamCollectorClient ) , nil
}
|
NewStreamCollectorGrpcClient returns a stream collector gRPC client
|
35
|
func NewProcessorGrpcClient ( address string , timeout time . Duration , security GRPCSecurity ) ( PluginProcessorClient , error ) { ctx := context . Background ( )
p , err := newPluginGrpcClient ( ctx , address , timeout , security , plugin . ProcessorPluginType )
if err != nil { return nil , err
}
return p . ( PluginProcessorClient ) , err
}
|
NewProcessorGrpcClient returns a processor gRPC Client .
|
36
|
func NewPublisherGrpcClient ( address string , timeout time . Duration , security GRPCSecurity ) ( PluginPublisherClient , error ) { ctx := context . Background ( )
p , err := newPluginGrpcClient ( ctx , address , timeout , security , plugin . PublisherPluginType )
if err != nil { return nil , err
}
return p . ( PluginPublisherClient ) , err
}
|
NewPublisherGrpcClient returns a publisher gRPC Client .
|
37
|
func newPluginGrpcClient ( ctx context . Context , address string , timeout time . Duration , security GRPCSecurity , typ plugin . PluginType ) ( interface { } , error ) { address , port , err := parseAddress ( address )
if err != nil { return nil , err
}
var p * grpcClient
var creds credentials . TransportCredentials
if creds , err = buildCredentials ( security ) ; err != nil { return nil , err
}
p , err = newGrpcClient ( ctx , address , int ( port ) , timeout , typ , creds )
if err != nil { return nil , err
}
return p , nil
}
|
newPluginGrpcClient returns a configured gRPC Client .
|
38
|
func ( f * Mock ) GetConfigPolicy ( ) ( * cpolicy . ConfigPolicy , error ) { c := cpolicy . New ( )
rule , _ := cpolicy . NewStringRule ( "name" , false , "bob" )
rule2 , _ := cpolicy . NewStringRule ( "password" , true )
p := cpolicy . NewPolicyNode ( )
p . Add ( rule )
p . Add ( rule2 )
c . Add ( [ ] string { "intel" , "mock" , "foo" } , p )
return c , nil
}
|
GetConfigPolicy returns a ConfigPolicyTree for testing
|
39
|
func beforeAction ( ctx * cli . Context ) error { username , password := checkForAuth ( ctx )
pClient , err = client . New ( ctx . String ( "url" ) , ctx . String ( "api-version" ) , ctx . Bool ( "insecure" ) , client . Timeout ( ctx . Duration ( "timeout" ) ) )
if err != nil { return fmt . Errorf ( "%v" , err )
}
pClient . Password = password
pClient . Username = username
if err = checkTribeCommand ( ctx ) ; err != nil { return fmt . Errorf ( "%v" , err )
}
return nil
}
|
Run before every command
|
40
|
func checkTribeCommand ( ctx * cli . Context ) error { tribe := false
for _ , a := range os . Args { for _ , command := range tribeCommands { if strings . Contains ( a , command . Name ) { tribe = true
break
}
}
if tribe { break
}
}
if ! tribe { return nil
}
resp := pClient . ListAgreements ( )
if resp . Err != nil { if resp . Err . Error ( ) == "Invalid credentials" { return resp . Err
}
return fmt . Errorf ( "Tribe mode must be enabled in snapteld to use tribe command" )
}
return nil
}
|
Checks if a tribe command was issued when tribe mode was not enabled on the specified snapteld instance .
|
41
|
func ( l * lru ) Select ( aps [ ] AvailablePlugin , _ string ) ( AvailablePlugin , error ) { t := time . Now ( )
index := - 1
for i , ap := range aps { if ap . LastHit ( ) . Before ( t ) || index == - 1 { index = i
t = ap . LastHit ( )
}
}
if index > - 1 { l . logger . WithFields ( log . Fields { "block" : "select" , "strategy" : l . String ( ) , "pool size" : len ( aps ) , "index" : aps [ index ] . String ( ) , "hitcount" : aps [ index ] . HitCount ( ) , } ) . Debug ( "plugin selected" )
return aps [ index ] , nil
}
l . logger . WithFields ( log . Fields { "block" : "select" , "strategy" : l . String ( ) , "error" : ErrCouldNotSelect , } ) . Error ( "error selecting" )
return nil , ErrCouldNotSelect
}
|
Select selects an available plugin using the least - recently - used strategy .
|
42
|
func ( l * lru ) Remove ( aps [ ] AvailablePlugin , taskID string ) ( AvailablePlugin , error ) { ap , err := l . Select ( aps , taskID )
if err != nil { return nil , err
}
return ap , nil
}
|
Remove selects a plugin Since there is no state to cleanup we only need to return the selected plugin
|
43
|
func ( c * Client ) ListMembers ( ) * ListMembersResult { resp , err := c . do ( "GET" , "/tribe/members" , ContentTypeJSON , nil )
if err != nil { return & ListMembersResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeMemberListType : return & ListMembersResult { resp . Body . ( * rbody . TribeMemberList ) , nil }
case rbody . ErrorType : return & ListMembersResult { Err : resp . Body . ( * rbody . Error ) }
default : return & ListMembersResult { Err : ErrAPIResponseMetaType }
}
}
|
ListMembers retrieves a list of tribe members through an HTTP GET call . A list of tribe member returns if it succeeds . Otherwise an error is returned .
|
44
|
func ( c * Client ) GetMember ( name string ) * GetMemberResult { resp , err := c . do ( "GET" , fmt . Sprintf ( "/tribe/member/%s" , name ) , ContentTypeJSON , nil )
if err != nil { return & GetMemberResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeMemberShowType : return & GetMemberResult { resp . Body . ( * rbody . TribeMemberShow ) , nil }
case rbody . ErrorType : return & GetMemberResult { Err : resp . Body . ( * rbody . Error ) }
default : return & GetMemberResult { Err : ErrAPIResponseMetaType }
}
}
|
GetMember retrieves the tribe member given a member name . The request is an HTTP GET call . The corresponding tribe member object returns if it succeeds . Otherwise an error is returned .
|
45
|
func ( c * Client ) ListAgreements ( ) * ListAgreementResult { resp , err := c . do ( "GET" , "/tribe/agreements" , ContentTypeJSON , nil )
if err != nil { return & ListAgreementResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeListAgreementType : return & ListAgreementResult { resp . Body . ( * rbody . TribeListAgreement ) , nil }
case rbody . ErrorType : return & ListAgreementResult { Err : resp . Body . ( * rbody . Error ) }
default : return & ListAgreementResult { Err : ErrAPIResponseMetaType }
}
}
|
ListAgreements retrieves a list of a tribe agreements through an HTTP GET call . A list of tribe agreement map returns if it succeeds . Otherwise an error is returned .
|
46
|
func ( c * Client ) AddAgreement ( name string ) * AddAgreementResult { b , err := json . Marshal ( struct { Name string `json:"name"`
} { Name : name } )
if err != nil { return & AddAgreementResult { Err : err }
}
resp , err := c . do ( "POST" , "/tribe/agreements" , ContentTypeJSON , b )
if err != nil { return & AddAgreementResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeAddAgreementType : return & AddAgreementResult { resp . Body . ( * rbody . TribeAddAgreement ) , nil }
case rbody . ErrorType : return & AddAgreementResult { Err : resp . Body . ( * rbody . Error ) }
default : return & AddAgreementResult { Err : ErrAPIResponseMetaType }
}
}
|
AddAgreement adds a tribe agreement giving an agreement name into tribe agreement list through an HTTP POST call . A map of tribe agreements with the newly added named agreement returns if it succeeds . Otherwise an error is returned . Note that the newly added agreement has no effect unless members join the agreement .
|
47
|
func ( c * Client ) DeleteAgreement ( name string ) * DeleteAgreementResult { resp , err := c . do ( "DELETE" , fmt . Sprintf ( "/tribe/agreements/%s" , name ) , ContentTypeJSON , nil )
if err != nil { return & DeleteAgreementResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeDeleteAgreementType : return & DeleteAgreementResult { resp . Body . ( * rbody . TribeDeleteAgreement ) , nil }
case rbody . ErrorType : return & DeleteAgreementResult { Err : resp . Body . ( * rbody . Error ) }
default : return & DeleteAgreementResult { Err : ErrAPIResponseMetaType }
}
}
|
DeleteAgreement removes a tribe agreement giving an agreement name from the tribe agreement list through an HTTP DELETE call . A map of tribe agreements with the specified agreement removed returns if it succeeds . Otherwise an error is returned . Note deleting an agreement removes the agreement from the tribe entirely for all the members of the agreement .
|
48
|
func ( c * Client ) GetAgreement ( name string ) * GetAgreementResult { resp , err := c . do ( "GET" , fmt . Sprintf ( "/tribe/agreements/%s" , name ) , ContentTypeJSON , nil )
if err != nil { return & GetAgreementResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeGetAgreementType : return & GetAgreementResult { resp . Body . ( * rbody . TribeGetAgreement ) , nil }
case rbody . ErrorType : return & GetAgreementResult { Err : resp . Body . ( * rbody . Error ) }
default : return & GetAgreementResult { Err : ErrAPIResponseMetaType }
}
}
|
GetAgreement retrieves a tribe agreement given an agreement name through an HTTP GET call . A tribe agreement returns if it succeeded . Otherwise an error is returned .
|
49
|
func ( c * Client ) JoinAgreement ( agreementName , memberName string ) * JoinAgreementResult { b , err := json . Marshal ( struct { MemberName string `json:"member_name"`
} { MemberName : memberName } )
if err != nil { return & JoinAgreementResult { Err : err }
}
resp , err := c . do ( "PUT" , fmt . Sprintf ( "/tribe/agreements/%s/join" , agreementName ) , ContentTypeJSON , b )
if err != nil { return & JoinAgreementResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeJoinAgreementType : return & JoinAgreementResult { resp . Body . ( * rbody . TribeJoinAgreement ) , nil }
case rbody . ErrorType : return & JoinAgreementResult { Err : resp . Body . ( * rbody . Error ) }
default : return & JoinAgreementResult { Err : ErrAPIResponseMetaType }
}
}
|
JoinAgreement adds a tribe member into the agreement given the agreement name and the member name . It is an HTTP PUT request . The agreement with the newly added member returns if it succeeds . Otherwise an error is returned . Note that dual directional agreement replication happens automatically through the gossip protocol between a newly joined member and existing members within the same agreement .
|
50
|
func ( c * Client ) LeaveAgreement ( agreementName , memberName string ) * LeaveAgreementResult { b , err := json . Marshal ( struct { MemberName string `json:"member_name"`
} { MemberName : memberName } )
if err != nil { return & LeaveAgreementResult { Err : err }
}
resp , err := c . do ( "DELETE" , fmt . Sprintf ( "/tribe/agreements/%s/leave" , agreementName ) , ContentTypeJSON , b )
if err != nil { return & LeaveAgreementResult { Err : err }
}
switch resp . Meta . Type { case rbody . TribeLeaveAgreementType : return & LeaveAgreementResult { resp . Body . ( * rbody . TribeLeaveAgreement ) , nil }
case rbody . ErrorType : return & LeaveAgreementResult { Err : resp . Body . ( * rbody . Error ) }
default : return & LeaveAgreementResult { Err : ErrAPIResponseMetaType }
}
}
|
LeaveAgreement removes a member from the agreement given the agreement and member names through an HTTP DELETE call . The agreement with the removed member returns if it succeeds . Otherwise an error is returned . For example it is useful to leave an agreement for a member node repair .
|
51
|
func ToNamespace ( n core . Namespace ) [ ] * NamespaceElement { elements := make ( [ ] * NamespaceElement , 0 , len ( n ) )
for _ , value := range n { ne := & NamespaceElement { Value : value . Value , Description : value . Description , Name : value . Name , }
elements = append ( elements , ne )
}
return elements
}
|
Convert core . Namespace to common . Namespace protobuf message
|
52
|
func ToCoreMetric ( mt * Metric ) core . Metric { var lastAdvertisedTime time . Time
if mt . LastAdvertisedTime . Sec == int64 ( - 62135596800 ) { lastAdvertisedTime = time . Unix ( time . Now ( ) . Unix ( ) , int64 ( time . Now ( ) . Nanosecond ( ) ) )
} else { lastAdvertisedTime = time . Unix ( mt . LastAdvertisedTime . Sec , mt . LastAdvertisedTime . Nsec )
}
ret := & metric { namespace : ToCoreNamespace ( mt . Namespace ) , version : int ( mt . Version ) , tags : mt . Tags , timeStamp : time . Unix ( mt . Timestamp . Sec , mt . Timestamp . Nsec ) , lastAdvertisedTime : lastAdvertisedTime , config : ConfigMapToConfig ( mt . Config ) , description : mt . Description , unit : mt . Unit , }
switch mt . Data . ( type ) { case * Metric_BytesData : ret . data = mt . GetBytesData ( )
case * Metric_StringData : ret . data = mt . GetStringData ( )
case * Metric_Float32Data : ret . data = mt . GetFloat32Data ( )
case * Metric_Float64Data : ret . data = mt . GetFloat64Data ( )
case * Metric_Int32Data : ret . data = mt . GetInt32Data ( )
case * Metric_Int64Data : ret . data = mt . GetInt64Data ( )
case * Metric_Uint32Data : ret . data = mt . GetUint32Data ( )
case * Metric_Uint64Data : ret . data = mt . GetUint64Data ( )
case * Metric_BoolData : ret . data = mt . GetBoolData ( )
}
return ret
}
|
Convert common . Metric to core . Metric
|
53
|
func ToCoreNamespace ( n [ ] * NamespaceElement ) core . Namespace { var namespace core . Namespace
for _ , val := range n { ele := core . NamespaceElement { Value : val . Value , Description : val . Description , Name : val . Name , }
namespace = append ( namespace , ele )
}
return namespace
}
|
Convert common . Namespace protobuf message to core . Namespace
|
54
|
func ToSubPluginMsg ( pl core . SubscribedPlugin ) * SubscribedPlugin { return & SubscribedPlugin { TypeName : pl . TypeName ( ) , Name : pl . Name ( ) , Version : int64 ( pl . Version ( ) ) , Config : ConfigToConfigMap ( pl . Config ( ) ) , }
}
|
Convert core . SubscribedPlugin to SubscribedPlugin protobuf message
|
55
|
func ToSubPlugin ( msg * SubscribedPlugin ) core . SubscribedPlugin { return SubPlugin { typeName : msg . TypeName , name : msg . Name , version : int ( msg . Version ) , config : ConfigMapToConfig ( msg . Config ) , }
}
|
Convert from a SubscribedPlugin protobuf message to core . SubscribedPlugin
|
56
|
func ToCorePluginMsg ( pl core . Plugin ) * Plugin { return & Plugin { TypeName : pl . TypeName ( ) , Name : pl . Name ( ) , Version : int64 ( pl . Version ( ) ) , }
}
|
Convert from core . Plugin to Plugin protobuf message
|
57
|
func ToCorePluginsMsg ( pls [ ] core . Plugin ) [ ] * Plugin { plugins := make ( [ ] * Plugin , len ( pls ) )
for i , v := range pls { plugins [ i ] = ToCorePluginMsg ( v )
}
return plugins
}
|
Convert from Plugin protobuf message to core . Plugin
|
58
|
func MsgToCorePlugin ( msg * Plugin ) core . Plugin { pl := & SubPlugin { typeName : msg . TypeName , name : msg . Name , version : int ( msg . Version ) , }
return core . Plugin ( pl )
}
|
Converts Plugin protobuf message to core . Plugin
|
59
|
func MsgToCorePlugins ( msg [ ] * Plugin ) [ ] core . Plugin { plugins := make ( [ ] core . Plugin , len ( msg ) )
for i , v := range msg { plugins [ i ] = MsgToCorePlugin ( v )
}
return plugins
}
|
Converts slice of plugin protobuf messages to core . Plugins
|
60
|
func ToSubPlugins ( msg [ ] * SubscribedPlugin ) [ ] core . SubscribedPlugin { plugins := make ( [ ] core . SubscribedPlugin , len ( msg ) )
for i , v := range msg { plugins [ i ] = ToSubPlugin ( v )
}
return plugins
}
|
Converts slice of SubscribedPlugin Messages to core . SubscribedPlugins
|
61
|
func ToSubPluginsMsg ( sp [ ] core . SubscribedPlugin ) [ ] * SubscribedPlugin { plugins := make ( [ ] * SubscribedPlugin , len ( sp ) )
for i , v := range sp { plugins [ i ] = ToSubPluginMsg ( v )
}
return plugins
}
|
Converts core . SubscribedPlugins to protobuf messages
|
62
|
func ConfigMapToConfig ( cfg * ConfigMap ) * cdata . ConfigDataNode { if cfg == nil { return nil
}
config := cdata . FromTable ( ParseConfig ( cfg ) )
return config
}
|
Converts configMaps to ConfigDataNode
|
63
|
func ConfigToConfigMap ( cd * cdata . ConfigDataNode ) * ConfigMap { if cd == nil { return nil
}
return ToConfigMap ( cd . Table ( ) )
}
|
Converts ConfigDataNode to ConfigMap protobuf message
|
64
|
func ConvertSnapErrors ( s [ ] * SnapError ) [ ] serror . SnapError { rerrs := make ( [ ] serror . SnapError , len ( s ) )
for i , err := range s { rerrs [ i ] = serror . New ( errors . New ( err . ErrorString ) , GetFields ( err ) )
}
return rerrs
}
|
Converts SnapError protobuf messages to serror . Snaperrors
|
65
|
func ToSnapError ( e * SnapError ) serror . SnapError { if e == nil { return nil
}
return serror . New ( errors . New ( e . ErrorString ) , GetFields ( e ) )
}
|
Converts a single SnapError protobuf message to SnapError
|
66
|
func GetFields ( s * SnapError ) map [ string ] interface { } { fields := make ( map [ string ] interface { } , len ( s . ErrorFields ) )
for key , value := range s . ErrorFields { fields [ key ] = value
}
return fields
}
|
Returns the fields from a SnapError protobuf message
|
67
|
func ( m * managers ) Get ( key string ) ( managesMetrics , error ) { if key == "" { return m . local , nil
}
m . mutex . RLock ( )
defer m . mutex . RUnlock ( )
if val , ok := m . remoteManagers [ key ] ; ok { return val , nil
} else { return nil , errors . New ( fmt . Sprintf ( "Client not found for: %v" , key ) )
}
}
|
Returns the managesMetric instance that maps to given string . If an empty string is given will instead return the local instance passed in on initialization .
|
68
|
func wmapToWorkflow ( wfMap * wmap . WorkflowMap ) ( * schedulerWorkflow , error ) { wf := & schedulerWorkflow { }
err := convertCollectionNode ( wfMap . Collect , wf )
if err != nil { return nil , err
}
wf . workflowMap = wfMap
return wf , nil
}
|
WmapToWorkflow attempts to convert a wmap . WorkflowMap to a schedulerWorkflow instance .
|
69
|
func ( s * schedulerWorkflow ) Start ( t * task ) { workflowLogger . WithFields ( log . Fields { "_block" : "workflow-start" , "task-id" : t . id , "task-name" : t . name , } ) . Debug ( "Starting workflow" )
s . state = WorkflowStarted
j := newCollectorJob ( s . metrics , t . deadlineDuration , t . metricsManager , t . workflow . configTree , t . id , s . tags )
errors := t . manager . Work ( j ) . Promise ( ) . Await ( )
if len ( errors ) > 0 { t . RecordFailure ( errors )
event := new ( scheduler_event . MetricCollectionFailedEvent )
event . TaskID = t . id
event . Errors = errors
defer s . eventEmitter . Emit ( event )
return
}
event := new ( scheduler_event . MetricCollectedEvent )
event . TaskID = t . id
event . Metrics = j . ( * collectorJob ) . metrics
defer s . eventEmitter . Emit ( event )
workJobs ( s . processNodes , s . publishNodes , t , j )
}
|
Start starts a workflow
|
70
|
func workJobs ( prs [ ] * processNode , pus [ ] * publishNode , t * task , pj job ) { if len ( prs ) == 0 && len ( pus ) == 0 { return
}
wg := & sync . WaitGroup { }
workflowLogger . WithFields ( log . Fields { "_block" : "work-jobs" , "task-id" : t . id , "task-name" : t . name , "count-process-nodes" : len ( prs ) , "count-publish-nodes" : len ( pus ) , "parent-node-type" : pj . TypeString ( ) , } ) . Debug ( "Batch submission of process and publish nodes" )
for _ , pr := range prs { wg . Add ( 1 )
go submitProcessJob ( pj , t , wg , pr )
}
for _ , pu := range pus { wg . Add ( 1 )
go submitPublishJob ( pj , t , wg , pu )
}
wg . Wait ( )
workflowLogger . WithFields ( log . Fields { "_block" : "work-jobs" , "task-id" : t . id , "task-name" : t . name , "count-process-nodes" : len ( prs ) , "count-publish-nodes" : len ( pus ) , "parent-node-type" : pj . TypeString ( ) , } ) . Debug ( "Batch submission complete" )
}
|
workJobs takes a slice of process and publish nodes and submits jobs for each for a task . It then iterates down any process nodes to submit their child node jobs for the task
|
71
|
func GetClientConnection ( ctx context . Context , addr string , port int ) ( * grpc . ClientConn , error ) { return GetClientConnectionWithCreds ( ctx , addr , port , nil )
}
|
GetClientConnection returns a grcp . ClientConn that is unsecured
|
72
|
func ( p * Config ) IsTLSEnabled ( ) bool { if p . TLSCertPath != "" && p . TLSKeyPath != "" { return true
}
return false
}
|
IsTLSEnabled returns true if config values enable TLS in plugin communication
|
73
|
func ( u CertTestUtil ) WritePEMFile ( fn string , pemHeader string , b [ ] byte ) error { f , err := os . Create ( fn )
if err != nil { return err
}
defer f . Close ( )
w := bufio . NewWriter ( f )
pem . Encode ( w , & pem . Block { Type : pemHeader , Bytes : b , } )
w . Flush ( )
return nil
}
|
WritePEMFile writes block of bytes into a PEM formatted file with given header .
|
74
|
func ( u CertTestUtil ) MakeCACertKeyPair ( caName , ouName string , keyValidPeriod time . Duration ) ( caCertTpl * x509 . Certificate , caCertBytes [ ] byte , caPrivKey * rsa . PrivateKey , err error ) { caPrivKey , err = rsa . GenerateKey ( rand . Reader , keyBitsDefault )
if err != nil { return nil , nil , nil , err
}
caPubKey := caPrivKey . Public ( )
caPubBytes , err := x509 . MarshalPKIXPublicKey ( caPubKey )
if err != nil { return nil , nil , nil , err
}
caPubSha256 := sha256 . Sum256 ( caPubBytes )
caCertTpl = & x509 . Certificate { SignatureAlgorithm : defaultSignatureAlgorithm , PublicKeyAlgorithm : defaultPublicKeyAlgorithm , Version : 3 , SerialNumber : big . NewInt ( 1 ) , Subject : pkix . Name { CommonName : caName , } , NotBefore : time . Now ( ) , NotAfter : time . Now ( ) . Add ( keyValidPeriod ) , KeyUsage : x509 . KeyUsageCertSign | x509 . KeyUsageCRLSign , BasicConstraintsValid : true , MaxPathLenZero : true , IsCA : true , SubjectKeyId : caPubSha256 [ : ] , }
caCertBytes , err = x509 . CreateCertificate ( rand . Reader , caCertTpl , caCertTpl , caPubKey , caPrivKey )
if err != nil { return nil , nil , nil , err
}
return caCertTpl , caCertBytes , caPrivKey , nil
}
|
MakeCACertKeyPair generates asymmetric private key and certificate for CA suitable for signing certificates
|
75
|
func ( u CertTestUtil ) MakeSubjCertKeyPair ( cn , ou string , keyValidPeriod time . Duration , caCertTpl * x509 . Certificate , caPrivKey * rsa . PrivateKey ) ( subjCertBytes [ ] byte , subjPrivKey * rsa . PrivateKey , err error ) { subjPrivKey , err = rsa . GenerateKey ( rand . Reader , keyBitsDefault )
if err != nil { return nil , nil , err
}
subjPubBytes , err := x509 . MarshalPKIXPublicKey ( subjPrivKey . Public ( ) )
if err != nil { return nil , nil , err
}
subjPubSha256 := sha256 . Sum256 ( subjPubBytes )
subjCertTpl := x509 . Certificate { SignatureAlgorithm : defaultSignatureAlgorithm , PublicKeyAlgorithm : defaultPublicKeyAlgorithm , Version : 3 , SerialNumber : big . NewInt ( 1 ) , Subject : pkix . Name { OrganizationalUnit : [ ] string { ou } , CommonName : cn , } , NotBefore : time . Now ( ) , NotAfter : time . Now ( ) . Add ( keyValidPeriod ) , KeyUsage : x509 . KeyUsageDigitalSignature | x509 . KeyUsageKeyEncipherment | x509 . KeyUsageDataEncipherment | x509 . KeyUsageKeyAgreement , ExtKeyUsage : [ ] x509 . ExtKeyUsage { x509 . ExtKeyUsageClientAuth , x509 . ExtKeyUsageServerAuth } , SubjectKeyId : subjPubSha256 [ : ] , }
subjCertTpl . DNSNames = [ ] string { "localhost" }
subjCertTpl . IPAddresses = [ ] net . IP { net . ParseIP ( "127.0.0.1" ) }
subjCertBytes , err = x509 . CreateCertificate ( rand . Reader , & subjCertTpl , caCertTpl , subjPrivKey . Public ( ) , caPrivKey )
return subjCertBytes , subjPrivKey , err
}
|
MakeSubjCertKeyPair generates a private key and a certificate for subject suitable for securing TLS communication
|
76
|
func ( c * CollectWorkflowMapNode ) GetConfigTree ( ) ( * cdata . ConfigDataTree , error ) { cdt := cdata . NewTree ( )
for ns_ , cmap := range c . Config { ns := strings . Split ( ns_ , "/" ) [ 1 : ]
cdn , err := configtoConfigDataNode ( cmap , ns_ )
if err != nil { return nil , err
}
cdt . Add ( ns , cdn )
}
return cdt , nil
}
|
GetConfigTree converts config data for collection node in wmap into a proper cdata . ConfigDataTree
|
77
|
func ( t * task ) mergeCliOptions ( ctx * cli . Context ) error { name := ctx . String ( "name" )
if ctx . IsSet ( "name" ) || name != "" { t . Name = name
}
deadline := ctx . String ( "deadline" )
if ctx . IsSet ( "deadline" ) || deadline != "" { t . Deadline = deadline
}
maxFailuresStrVal := ctx . String ( "max-failures" )
if ctx . IsSet ( "max-failures" ) || maxFailuresStrVal != "" { maxFailures , err := stringValToInt ( maxFailuresStrVal )
if err != nil { return err
}
t . MaxFailures = maxFailures
}
return t . setScheduleFromCliOptions ( ctx )
}
|
merge the command - line options into the current task
|
78
|
func ( r * schemaValidatorType ) validateSchema ( schema , cfg string ) [ ] serror . SnapError { schemaLoader := gojsonschema . NewStringLoader ( schema )
testDoc := gojsonschema . NewStringLoader ( cfg )
result , err := gojsonschema . Validate ( schemaLoader , testDoc )
var serrors [ ] serror . SnapError
if err != nil { serrors = append ( serrors , serror . New ( err ) )
return serrors
}
if result . Valid ( ) { return nil
}
for _ , err := range result . Errors ( ) { serr := serror . New ( errors . New ( "Validate schema error" ) )
serr . SetFields ( map [ string ] interface { } { "value" : err . Value ( ) , "context" : err . Context ( ) . String ( "::" ) , "description" : err . Description ( ) , } )
serrors = append ( serrors , serr )
}
return serrors
}
|
and define an implementation for that type that performs the schema validation
|
79
|
func newTask ( s schedule . Schedule , wf * schedulerWorkflow , m * workManager , mm managesMetrics , emitter gomit . Emitter , opts ... core . TaskOption ) ( * task , error ) { taskID := uuid . New ( )
name := fmt . Sprintf ( "Task-%s" , taskID )
wf . eventEmitter = emitter
mgrs := newManagers ( mm )
err := createTaskClients ( & mgrs , wf )
if err != nil { return nil , err
}
_ , stream := s . ( * schedule . StreamingSchedule )
task := & task { id : taskID , name : name , schResponseChan : make ( chan schedule . Response ) , schedule : s , state : core . TaskStopped , creationTime : time . Now ( ) , workflow : wf , manager : m , metricsManager : mm , deadlineDuration : DefaultDeadlineDuration , stopOnFailure : DefaultStopOnFailure , eventEmitter : emitter , RemoteManagers : mgrs , isStream : stream , }
for _ , opt := range opts { opt ( task )
}
return task , nil
}
|
NewTask creates a Task
|
80
|
func ( t * task ) Spin ( ) { t . Lock ( )
defer t . Unlock ( )
if t . isStream { t . state = core . TaskSpinning
t . killChan = make ( chan struct { } )
go t . stream ( )
return
}
t . lastFireTime = time . Time { }
if t . state == core . TaskStopped || t . state == core . TaskEnded { t . state = core . TaskSpinning
t . killChan = make ( chan struct { } )
go t . spin ( )
}
}
|
Spin will start a task spinning in its own routine while it waits for its schedule .
|
81
|
func ( t * task ) stream ( ) { var consecutiveFailures int
resetTime := time . Second * 3
for { metricsChan , errChan , err := t . metricsManager . StreamMetrics ( t . id , t . workflow . tags , t . maxCollectDuration , t . maxMetricsBuffer )
if err != nil { consecutiveFailures ++
if t . stopOnFailure >= 0 && consecutiveFailures >= t . stopOnFailure { taskLogger . WithFields ( log . Fields { "_block" : "stream" , "task-id" : t . id , "task-name" : t . name , "consecutive failures" : consecutiveFailures , "error" : t . lastFailureMessage , } ) . Error ( ErrTaskDisabledOnFailures )
t . disable ( t . lastFailureMessage )
return
}
time . Sleep ( resetTime )
continue
} else { consecutiveFailures = 0
}
done := false
for ! done { if errChan == nil { break
}
select { case <- t . killChan : t . Lock ( )
t . state = core . TaskStopped
t . Unlock ( )
done = true
event := new ( scheduler_event . TaskStoppedEvent )
event . TaskID = t . id
defer t . eventEmitter . Emit ( event )
return
case mts , ok := <- metricsChan : if ! ok { metricsChan = nil
break
}
if len ( mts ) == 0 { continue
}
t . hitCount ++
consecutiveFailures = 0
t . workflow . StreamStart ( t , mts )
case err := <- errChan : taskLogger . WithFields ( log . Fields { "_block" : "stream" , "task-id" : t . id , "task-name" : t . name , } ) . Error ( "Error: " + err . Error ( ) )
consecutiveFailures ++
if err . Error ( ) == "connection broken" { time . Sleep ( resetTime )
done = true
}
if t . stopOnFailure >= 0 && consecutiveFailures >= t . stopOnFailure { taskLogger . WithFields ( log . Fields { "_block" : "stream" , "task-id" : t . id , "task-name" : t . name , "consecutive failures" : consecutiveFailures , "error" : t . lastFailureMessage , } ) . Error ( ErrTaskDisabledOnFailures )
t . disable ( t . lastFailureMessage )
return
}
}
}
}
}
|
Fork stream stuff here
|
82
|
func ( t * task ) UnsubscribePlugins ( ) [ ] serror . SnapError { depGroups := getWorkflowPlugins ( t . workflow . processNodes , t . workflow . publishNodes , t . workflow . metrics )
var errs [ ] serror . SnapError
for k := range depGroups { event := & scheduler_event . PluginsUnsubscribedEvent { TaskID : t . ID ( ) , Plugins : depGroups [ k ] . subscribedPlugins , }
defer t . eventEmitter . Emit ( event )
mgr , err := t . RemoteManagers . Get ( k )
if err != nil { errs = append ( errs , serror . New ( err ) )
} else { uerrs := mgr . UnsubscribeDeps ( t . ID ( ) )
if len ( uerrs ) > 0 { errs = append ( errs , uerrs ... )
}
}
}
for _ , err := range errs { taskLogger . WithFields ( log . Fields { "_block" : "UnsubscribePlugins" , "task-id" : t . id , "task-name" : t . name , "task-state" : t . state , } ) . Error ( err )
}
return errs
}
|
UnsubscribePlugins groups task dependencies by the node they live in workflow and unsubscribe them
|
83
|
func ( t * task ) SubscribePlugins ( ) ( [ ] string , [ ] serror . SnapError ) { depGroups := getWorkflowPlugins ( t . workflow . processNodes , t . workflow . publishNodes , t . workflow . metrics )
var subbedDeps [ ] string
for k := range depGroups { var errs [ ] serror . SnapError
mgr , err := t . RemoteManagers . Get ( k )
if err != nil { errs = append ( errs , serror . New ( err ) )
} else { errs = mgr . SubscribeDeps ( t . ID ( ) , depGroups [ k ] . requestedMetrics , depGroups [ k ] . subscribedPlugins , t . workflow . configTree )
}
if len ( errs ) > 0 { for _ , key := range subbedDeps { mgr , err := t . RemoteManagers . Get ( key )
if err != nil { errs = append ( errs , serror . New ( err ) )
} else { uerrs := mgr . UnsubscribeDeps ( t . ID ( ) )
errs = append ( errs , uerrs ... )
}
}
return nil , errs
}
subbedDeps = append ( subbedDeps , k )
}
return subbedDeps , nil
}
|
SubscribePlugins groups task dependencies by the node they live in workflow and subscribe them . If there are errors with subscribing any deps manage unsubscribing all other deps that may have already been subscribed and then return the errors .
|
84
|
func ( t * task ) Enable ( ) error { t . Lock ( )
defer t . Unlock ( )
if t . state != core . TaskDisabled { return ErrTaskNotDisabled
}
t . state = core . TaskStopped
return nil
}
|
Enable changes the state from Disabled to Stopped
|
85
|
func ( t * task ) disable ( failureMsg string ) { t . Lock ( )
t . state = core . TaskDisabled
t . Unlock ( )
event := new ( scheduler_event . TaskDisabledEvent )
event . TaskID = t . id
event . Why = fmt . Sprintf ( "Task disabled with error: %s" , failureMsg )
defer t . eventEmitter . Emit ( event )
}
|
disable proceeds disabling a task which consists of changing task state to disabled and emitting an appropriate event
|
86
|
func ( t * task ) RecordFailure ( e [ ] error ) { t . failureMutex . Lock ( )
defer t . failureMutex . Unlock ( )
t . failedRuns ++
t . lastFailureTime = t . lastFireTime
t . lastFailureMessage = e [ len ( e ) - 1 ] . Error ( )
}
|
RecordFailure updates the failed runs and last failure properties
|
87
|
func ( t * taskCollection ) Get ( id string ) * task { t . Lock ( )
defer t . Unlock ( )
if t , ok := t . table [ id ] ; ok { return t
}
return nil
}
|
Get given a task id returns a Task or nil if not found
|
88
|
func ( t * taskCollection ) add ( task * task ) error { t . Lock ( )
defer t . Unlock ( )
if _ , ok := t . table [ task . id ] ; ! ok { t . table [ task . id ] = task
} else { taskLogger . WithFields ( log . Fields { "_module" : "scheduler-taskCollection" , "_block" : "add" , "task id" : task . id , } ) . Error ( ErrTaskHasAlreadyBeenAdded . Error ( ) )
return ErrTaskHasAlreadyBeenAdded
}
return nil
}
|
Add given a reference to a task adds it to the collection of tasks . An error is returned if the task already exists in the collection .
|
89
|
func ( t * taskCollection ) remove ( task * task ) error { t . Lock ( )
defer t . Unlock ( )
if _ , ok := t . table [ task . id ] ; ok { if task . state != core . TaskStopped && task . state != core . TaskDisabled && task . state != core . TaskEnded { taskLogger . WithFields ( log . Fields { "_block" : "remove" , "task id" : task . id , } ) . Error ( ErrTaskNotStopped )
return ErrTaskNotStopped
}
delete ( t . table , task . id )
} else { taskLogger . WithFields ( log . Fields { "_block" : "remove" , "task id" : task . id , } ) . Error ( ErrTaskNotFound )
return ErrTaskNotFound
}
return nil
}
|
remove will remove a given task from tasks . The task must be stopped . Can return errors ErrTaskNotFound and ErrTaskNotStopped .
|
90
|
func ( t * taskCollection ) Table ( ) map [ string ] * task { t . Lock ( )
defer t . Unlock ( )
tasks := make ( map [ string ] * task )
for id , t := range t . table { tasks [ id ] = t
}
return tasks
}
|
Table returns a copy of the taskCollection
|
91
|
func createTaskClients ( mgrs * managers , wf * schedulerWorkflow ) error { return walkWorkflow ( wf . processNodes , wf . publishNodes , mgrs )
}
|
createTaskClients walks the workflowmap and creates clients for this task remoteManagers so that nodes that require proxy request can make them .
|
92
|
func ( s * sticky ) Select ( aps [ ] AvailablePlugin , taskID string ) ( AvailablePlugin , error ) { if ap , ok := s . plugins [ taskID ] ; ok && ap != nil { return ap , nil
}
return s . selectPlugin ( aps , taskID )
}
|
Select selects an available plugin using the sticky plugin strategy .
|
93
|
func OptEnableRunnerTLS ( grpcSecurity client . GRPCSecurity ) pluginRunnerOpt { return func ( r * runner ) { r . grpcSecurity = grpcSecurity
}
}
|
OptEnableRunnerTLS enables the TLS configuration in runner
|
94
|
func ( r * runner ) Start ( ) error { if len ( r . delegates ) == 0 { return errors . New ( "No delegates added before called Start()" )
}
for _ , del := range r . delegates { e := del . RegisterHandler ( HandlerRegistrationName , r )
if e != nil { return e
}
}
r . monitor . Start ( r . availablePlugins )
runnerLog . WithFields ( log . Fields { "_block" : "start" , } ) . Debug ( "started" )
return nil
}
|
Begin handing events and managing available plugins
|
95
|
func ( r * runner ) Stop ( ) [ ] error { var errs [ ] error
r . monitor . Stop ( )
for _ , del := range r . delegates { e := del . UnregisterHandler ( HandlerRegistrationName )
if e != nil { errs = append ( errs , e )
}
}
defer runnerLog . WithFields ( log . Fields { "_block" : "start-plugin" , } ) . Debug ( "stopped" )
return errs
}
|
Stop handling gracefully stop all plugins .
|
96
|
func ( r * runner ) HandleGomitEvent ( e gomit . Event ) { switch v := e . Body . ( type ) { case * control_event . DeadAvailablePluginEvent : runnerLog . WithFields ( log . Fields { "_block" : "handle-events" , "event" : v . Namespace ( ) , "aplugin" : v . String , } ) . Warning ( "handling dead available plugin event" )
pool , err := r . availablePlugins . getPool ( v . Key )
if err != nil { runnerLog . WithFields ( log . Fields { "_block" : "handle-events" , "aplugin" : v . String , } ) . Error ( err . Error ( ) )
return
}
if pool != nil { pool . Kill ( v . Id , "plugin dead" )
}
if pool . Eligible ( ) { if pool . RestartCount ( ) < MaxPluginRestartCount || MaxPluginRestartCount == - 1 { e := r . restartPlugin ( v . Key )
if e != nil { runnerLog . WithFields ( log . Fields { "_block" : "handle-events" , "aplugin" : v . String , } ) . Error ( e . Error ( ) )
return
}
pool . IncRestartCount ( )
runnerLog . WithFields ( log . Fields { "_block" : "handle-events" , "aplugin" : v . String , "restart-count" : pool . RestartCount ( ) , } ) . Warning ( "plugin restarted" )
r . emitter . Emit ( & control_event . RestartedAvailablePluginEvent { Id : v . Id , Name : v . Name , Version : v . Version , Key : v . Key , Type : v . Type , } )
} else { runnerLog . WithFields ( log . Fields { "_block" : "handle-events" , "aplugin" : v . String , } ) . Warning ( "plugin disabled due to exceeding restart limit: " , MaxPluginRestartCount )
r . emitter . Emit ( & control_event . MaxPluginRestartsExceededEvent { Id : v . Id , Name : v . Name , Version : v . Version , Key : v . Key , Type : v . Type , } )
}
}
case * control_event . PluginUnsubscriptionEvent : runnerLog . WithFields ( log . Fields { "_block" : "subscribe-pool" , "event" : v . Namespace ( ) , "plugin-name" : v . PluginName , "plugin-version" : v . PluginVersion , "plugin-type" : core . PluginType ( v . PluginType ) . String ( ) , } ) . Debug ( "handling plugin unsubscription event" )
err := r . handleUnsubscription ( core . PluginType ( v . PluginType ) . String ( ) , v . PluginName , v . PluginVersion , v . TaskId )
if err != nil { return
}
default : runnerLog . WithFields ( log . Fields { "_block" : "handle-events" , "event" : v . Namespace ( ) , } ) . Info ( "Nothing to do for this event" )
}
}
|
Empty handler acting as placeholder until implementation . This helps tests pass to ensure registration works .
|
97
|
func ( q * queue ) Start ( ) { q . mutex . Lock ( )
defer q . mutex . Unlock ( )
if q . status == queueStopped { q . status = queueRunning
go q . start ( )
}
}
|
begins the queue handling loop
|
98
|
func ( q * queue ) Stop ( ) { q . mutex . Lock ( )
defer q . mutex . Unlock ( )
if q . status != queueStopped { close ( q . kill )
q . status = queueStopped
}
}
|
Stop closes both Err and Event channels and causes the handling loop to exit .
|
99
|
func IsUri ( url string ) bool { if ! govalidator . IsURL ( url ) || ! strings . HasPrefix ( url , "http" ) { return false
}
return true
}
|
Checks if string is URL
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- -