_id stringlengths 2 7 | title stringlengths 1 118 | partition stringclasses 3 values | text stringlengths 52 85.5k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q15200 | SetListenAddress | train | func (p *Prometheus) SetListenAddress(address string) {
p.listenAddress = address
if p.listenAddress != "" {
p.router = gin.Default()
}
} | go | {
"resource": ""
} |
q15201 | SetMetricsPath | train | func (p *Prometheus) SetMetricsPath(e *gin.Engine) {
if p.listenAddress != "" {
p.router.GET(p.MetricsPath, prometheusHandler())
p.runServer()
} else {
e.GET(p.MetricsPath, prometheusHandler())
}
} | go | {
"resource": ""
} |
q15202 | SetMetricsPathWithAuth | train | func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) {
if p.listenAddress != "" {
p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
p.runServer()
} else {
e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
}
} | go | {
"resource": ""
} |
q15203 | NewMetric | train | func NewMetric(m *Metric, subsystem string) prometheus.Collector {
var metric prometheus.Collector
switch m.Type {
case "counter_vec":
metric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "counter":
metric = prometheus.NewCounter(
prometheus.CounterOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "gauge_vec":
metric = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "gauge":
metric = prometheus.NewGauge(
prometheus.GaugeOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "histogram_vec":
metric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "histogram":
metric = prometheus.NewHistogram(
prometheus.HistogramOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
case "summary_vec":
metric = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
m.Args,
)
case "summary":
metric = prometheus.NewSummary(
prometheus.SummaryOpts{
Subsystem: subsystem,
Name: m.Name,
Help: m.Description,
},
)
}
return metric
} | go | {
"resource": ""
} |
q15204 | Use | train | func (p *Prometheus) Use(e *gin.Engine) {
e.Use(p.HandlerFunc())
p.SetMetricsPath(e)
} | go | {
"resource": ""
} |
q15205 | UseWithAuth | train | func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) {
e.Use(p.HandlerFunc())
p.SetMetricsPathWithAuth(e, accounts)
} | go | {
"resource": ""
} |
q15206 | HandlerFunc | train | func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
return func(c *gin.Context) {
if c.Request.URL.String() == p.MetricsPath {
c.Next()
return
}
start := time.Now()
reqSz := computeApproximateRequestSize(c.Request)
c.Next()
status := strconv.Itoa(c.Writer.Status())
elapsed := float64(time.Since(start)) / float64(time.Second)
resSz := float64(c.Writer.Size())
p.reqDur.Observe(elapsed)
url := p.ReqCntURLLabelMappingFn(c)
// jlambert Oct 2018 - sidecar specific mod
if len(p.URLLabelFromContext) > 0 {
u, found := c.Get(p.URLLabelFromContext)
if !found {
u = "unknown"
}
url = u.(string)
}
p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
p.reqSz.Observe(float64(reqSz))
p.resSz.Observe(resSz)
}
} | go | {
"resource": ""
} |
q15207 | findBlice | train | func (m *Matcher) findBlice(b []byte) *node {
n := &m.trie[0]
for n != nil && len(b) > 0 {
n = n.child[int(b[0])]
b = b[1:]
}
return n
} | go | {
"resource": ""
} |
q15208 | buildTrie | train | func (m *Matcher) buildTrie(dictionary [][]byte) {
// Work out the maximum size for the trie (all dictionary entries
// are distinct plus the root). This is used to preallocate memory
// for it.
max := 1
for _, blice := range dictionary {
max += len(blice)
}
m.trie = make([]node, max)
// Calling this an ignoring its argument simply allocated
// m.trie[0] which will be the root element
m.getFreeNode()
// This loop builds the nodes in the trie by following through
// each dictionary entry building the children pointers.
for i, blice := range dictionary {
n := m.root
var path []byte
for _, b := range blice {
path = append(path, b)
c := n.child[int(b)]
if c == nil {
c = m.getFreeNode()
n.child[int(b)] = c
c.b = make([]byte, len(path))
copy(c.b, path)
// Nodes directly under the root node will have the
// root as their fail point as there are no suffixes
// possible.
if len(path) == 1 {
c.fail = m.root
}
c.suffix = m.root
}
n = c
}
// The last value of n points to the node representing a
// dictionary entry
n.output = true
n.index = i
}
l := new(list.List)
l.PushBack(m.root)
for l.Len() > 0 {
n := l.Remove(l.Front()).(*node)
for i := 0; i < 256; i++ {
c := n.child[i]
if c != nil {
l.PushBack(c)
for j := 1; j < len(c.b); j++ {
c.fail = m.findBlice(c.b[j:])
if c.fail != nil {
break
}
}
if c.fail == nil {
c.fail = m.root
}
for j := 1; j < len(c.b); j++ {
s := m.findBlice(c.b[j:])
if s != nil && s.output {
c.suffix = s
break
}
}
}
}
}
for i := 0; i < m.extent; i++ {
for c := 0; c < 256; c++ {
n := &m.trie[i]
for n.child[c] == nil && !n.root {
n = n.fail
}
m.trie[i].fails[c] = n
}
}
m.trie = m.trie[:m.extent]
} | go | {
"resource": ""
} |
q15209 | NewMatcher | train | func NewMatcher(dictionary [][]byte) *Matcher {
m := new(Matcher)
m.buildTrie(dictionary)
return m
} | go | {
"resource": ""
} |
q15210 | Match | train | func (m *Matcher) Match(in []byte) []int {
m.counter += 1
var hits []int
n := m.root
for _, b := range in {
c := int(b)
if !n.root && n.child[c] == nil {
n = n.fails[c]
}
if n.child[c] != nil {
f := n.child[c]
n = f
if f.output && f.counter != m.counter {
hits = append(hits, f.index)
f.counter = m.counter
}
for !f.suffix.root {
f = f.suffix
if f.counter != m.counter {
hits = append(hits, f.index)
f.counter = m.counter
} else {
// There's no point working our way up the
// suffixes if it's been done before for this call
// to Match. The matches are already in hits.
break
}
}
}
}
return hits
} | go | {
"resource": ""
} |
q15211 | stopHandler | train | func stopHandler(stopurl string, cancel bool, wrapped fetchbot.Handler) fetchbot.Handler {
return fetchbot.HandlerFunc(func(ctx *fetchbot.Context, res *http.Response, err error) {
if ctx.Cmd.URL().String() == stopurl {
fmt.Printf(">>>>> STOP URL %s\n", ctx.Cmd.URL())
// generally not a good idea to stop/block from a handler goroutine
// so do it in a separate goroutine
go func() {
if cancel {
ctx.Q.Cancel()
} else {
ctx.Q.Close()
}
}()
return
}
wrapped.Handle(ctx, res, err)
})
} | go | {
"resource": ""
} |
q15212 | logHandler | train | func logHandler(wrapped fetchbot.Handler) fetchbot.Handler {
return fetchbot.HandlerFunc(func(ctx *fetchbot.Context, res *http.Response, err error) {
if err == nil {
fmt.Printf("[%d] %s %s - %s\n", res.StatusCode, ctx.Cmd.Method(), ctx.Cmd.URL(), res.Header.Get("Content-Type"))
}
wrapped.Handle(ctx, res, err)
})
} | go | {
"resource": ""
} |
q15213 | NewHandlerCmd | train | func NewHandlerCmd(method, rawURL string, fn func(*Context, *http.Response, error)) (*HandlerCmd, error) {
parsedURL, err := url.Parse(rawURL)
if err != nil {
return nil, err
}
return &HandlerCmd{&Cmd{parsedURL, method}, HandlerFunc(fn)}, nil
} | go | {
"resource": ""
} |
q15214 | Handle | train | func (h HandlerFunc) Handle(ctx *Context, res *http.Response, err error) {
h(ctx, res, err)
} | go | {
"resource": ""
} |
q15215 | NewMux | train | func NewMux() *Mux {
return &Mux{
// Default handler is a no-op
DefaultHandler: HandlerFunc(func(ctx *Context, res *http.Response, err error) {}),
errm: make(map[error]Handler),
res: make(map[*ResponseMatcher]bool),
}
} | go | {
"resource": ""
} |
q15216 | Handle | train | func (mux *Mux) Handle(ctx *Context, res *http.Response, err error) {
mux.mu.RLock()
defer mux.mu.RUnlock()
if err != nil {
// Find a matching error handler
if h, ok := mux.errm[err]; ok {
h.Handle(ctx, res, err)
return
}
if h, ok := mux.errm[nil]; ok {
h.Handle(ctx, res, err)
return
}
} else {
// Find a matching response handler
var h Handler
var n = -1
for r := range mux.res {
if ok, cnt := r.match(res); ok {
if cnt > n {
h, n = r.h, cnt
}
}
}
if h != nil {
h.Handle(ctx, res, err)
return
}
}
mux.DefaultHandler.Handle(ctx, res, err)
} | go | {
"resource": ""
} |
q15217 | HandleError | train | func (mux *Mux) HandleError(err error, h Handler) {
mux.mu.Lock()
defer mux.mu.Unlock()
mux.errm[err] = h
} | go | {
"resource": ""
} |
q15218 | match | train | func (r *ResponseMatcher) match(res *http.Response) (bool, int) {
if r.method != "" {
if r.method != res.Request.Method {
return false, 0
}
}
if r.contentType != "" {
if r.contentType != getContentType(res.Header.Get("Content-Type")) {
return false, 0
}
}
if r.minStatus != 0 || r.maxStatus != 0 {
if res.StatusCode < r.minStatus || res.StatusCode > r.maxStatus {
return false, 0
}
}
if r.scheme != "" {
if res.Request.URL.Scheme != r.scheme {
return false, 0
}
}
if r.host != "" {
if res.Request.URL.Host != r.host {
return false, 0
}
}
if r.predicate != nil {
if !r.predicate(res) {
return false, 0
}
}
if r.path != "" {
if strings.HasPrefix(res.Request.URL.Path, r.path) {
return true, len(r.path)
}
return false, 0
}
return true, 0
} | go | {
"resource": ""
} |
q15219 | Status | train | func (r *ResponseMatcher) Status(code int) *ResponseMatcher {
r.mux.mu.Lock()
defer r.mux.mu.Unlock()
r.minStatus = code
r.maxStatus = code
return r
} | go | {
"resource": ""
} |
q15220 | StatusRange | train | func (r *ResponseMatcher) StatusRange(min, max int) *ResponseMatcher {
if min > max {
min, max = max, min
}
r.mux.mu.Lock()
defer r.mux.mu.Unlock()
r.minStatus = min
r.maxStatus = max
return r
} | go | {
"resource": ""
} |
q15221 | Scheme | train | func (r *ResponseMatcher) Scheme(scheme string) *ResponseMatcher {
r.mux.mu.Lock()
defer r.mux.mu.Unlock()
r.scheme = scheme
return r
} | go | {
"resource": ""
} |
q15222 | Host | train | func (r *ResponseMatcher) Host(host string) *ResponseMatcher {
r.mux.mu.Lock()
defer r.mux.mu.Unlock()
r.host = host
return r
} | go | {
"resource": ""
} |
q15223 | Path | train | func (r *ResponseMatcher) Path(p string) *ResponseMatcher {
r.mux.mu.Lock()
defer r.mux.mu.Unlock()
r.path = p
return r
} | go | {
"resource": ""
} |
q15224 | Custom | train | func (r *ResponseMatcher) Custom(predicate func(*http.Response) bool) *ResponseMatcher {
r.mux.mu.Lock()
defer r.mux.mu.Unlock()
r.predicate = predicate
return r
} | go | {
"resource": ""
} |
q15225 | Handler | train | func (r *ResponseMatcher) Handler(h Handler) *ResponseMatcher {
r.mux.mu.Lock()
defer r.mux.mu.Unlock()
r.h = h
if !r.mux.res[r] {
r.mux.res[r] = true
}
return r
} | go | {
"resource": ""
} |
q15226 | New | train | func New(h Handler) *Fetcher {
return &Fetcher{
Handler: h,
CrawlDelay: DefaultCrawlDelay,
HttpClient: http.DefaultClient,
UserAgent: DefaultUserAgent,
WorkerIdleTTL: DefaultWorkerIdleTTL,
dbg: make(chan *DebugInfo, 1),
}
} | go | {
"resource": ""
} |
q15227 | Close | train | func (q *Queue) Close() error {
// Make sure it is not already closed, as this is a run-time panic
select {
case <-q.closed:
// Already closed, no-op
return nil
default:
// Close the signal-channel
close(q.closed)
// Send a nil Command to make sure the processQueue method sees the close signal.
q.ch <- nil
// Wait for the Fetcher to drain.
q.wg.Wait()
// Unblock any callers waiting on q.Block
close(q.done)
return nil
}
} | go | {
"resource": ""
} |
q15228 | Cancel | train | func (q *Queue) Cancel() error {
select {
case <-q.cancelled:
// already cancelled, no-op
return nil
default:
// mark the queue as cancelled
close(q.cancelled)
// Close the Queue, that will wait for pending commands to drain
// will unblock any callers waiting on q.Block
return q.Close()
}
} | go | {
"resource": ""
} |
q15229 | Send | train | func (q *Queue) Send(c Command) error {
if c == nil {
return ErrEmptyHost
}
if u := c.URL(); u == nil || u.Host == "" {
return ErrEmptyHost
}
select {
case <-q.closed:
return ErrQueueClosed
default:
q.ch <- c
}
return nil
} | go | {
"resource": ""
} |
q15230 | SendString | train | func (q *Queue) SendString(method string, rawurl ...string) (int, error) {
return q.sendWithMethod(method, rawurl)
} | go | {
"resource": ""
} |
q15231 | SendStringHead | train | func (q *Queue) SendStringHead(rawurl ...string) (int, error) {
return q.sendWithMethod("HEAD", rawurl)
} | go | {
"resource": ""
} |
q15232 | SendStringGet | train | func (q *Queue) SendStringGet(rawurl ...string) (int, error) {
return q.sendWithMethod("GET", rawurl)
} | go | {
"resource": ""
} |
q15233 | Start | train | func (f *Fetcher) Start() *Queue {
f.hosts = make(map[string]chan Command)
f.q = &Queue{
ch: make(chan Command, 1),
closed: make(chan struct{}),
cancelled: make(chan struct{}),
done: make(chan struct{}),
}
// Start the one and only queue processing goroutine.
f.q.wg.Add(1)
go f.processQueue()
return f.q
} | go | {
"resource": ""
} |
q15234 | Debug | train | func (f *Fetcher) Debug() <-chan *DebugInfo {
f.dbgmu.Lock()
defer f.dbgmu.Unlock()
f.debugging = true
return f.dbg
} | go | {
"resource": ""
} |
q15235 | processQueue | train | func (f *Fetcher) processQueue() {
loop:
for v := range f.q.ch {
if v == nil {
// Special case, when the Queue is closed, a nil command is sent, use this
// indicator to check for the closed signal, instead of looking on every loop.
select {
case <-f.q.closed:
// Close signal, exit loop
break loop
default:
// Keep going
}
}
select {
case <-f.q.cancelled:
// queue got cancelled, drain
continue
default:
// go on
}
// Get the URL to enqueue
u := v.URL()
// Check if a channel is already started for this host
f.mu.Lock()
in, ok := f.hosts[u.Host]
if !ok {
// Start a new channel and goroutine for this host.
var rob *url.URL
if !f.DisablePoliteness {
// Must send the robots.txt request.
rob = u.ResolveReference(robotsTxtParsedPath)
}
// Create the infinite queue: the in channel to send on, and the out channel
// to read from in the host's goroutine, and add to the hosts map
var out chan Command
in, out = make(chan Command, 1), make(chan Command, 1)
f.hosts[u.Host] = in
f.mu.Unlock()
f.q.wg.Add(1)
// Start the infinite queue goroutine for this host
go sliceIQ(in, out)
// Start the working goroutine for this host
go f.processChan(out, u.Host)
if !f.DisablePoliteness {
// Enqueue the robots.txt request first.
in <- robotCommand{&Cmd{U: rob, M: "GET"}}
}
} else {
f.mu.Unlock()
}
// Send the request
in <- v
// Send debug info, but do not block if full
f.dbgmu.Lock()
if f.debugging {
f.mu.Lock()
select {
case f.dbg <- &DebugInfo{len(f.hosts)}:
default:
}
f.mu.Unlock()
}
f.dbgmu.Unlock()
}
// Close all host channels now that it is impossible to send on those. Those are the `in`
// channels of the infinite queue. It will then drain any pending events, triggering the
// handlers for each in the worker goro, and then the infinite queue goro will terminate
// and close the `out` channel, which in turn will terminate the worker goro.
f.mu.Lock()
for _, ch := range f.hosts {
close(ch)
}
f.hosts = make(map[string]chan Command)
f.mu.Unlock()
f.q.wg.Done()
} | go | {
"resource": ""
} |
q15236 | processChan | train | func (f *Fetcher) processChan(ch <-chan Command, hostKey string) {
var (
agent *robotstxt.Group
wait <-chan time.Time
ttl <-chan time.Time
delay = f.CrawlDelay
)
loop:
for {
select {
case <-f.q.cancelled:
break loop
case v, ok := <-ch:
if !ok {
// Terminate this goroutine, channel is closed
break loop
}
// Wait for the prescribed delay
if wait != nil {
<-wait
}
// was it cancelled during the wait? check again
select {
case <-f.q.cancelled:
break loop
default:
// go on
}
switch r, ok := v.(robotCommand); {
case ok:
// This is the robots.txt request
agent = f.getRobotAgent(r)
// Initialize the crawl delay
if agent != nil && agent.CrawlDelay > 0 {
delay = agent.CrawlDelay
}
wait = time.After(delay)
case agent == nil || agent.Test(v.URL().Path):
// Path allowed, process the request
res, err := f.doRequest(v)
f.visit(v, res, err)
// No delay on error - the remote host was not reached
if err == nil {
wait = time.After(delay)
} else {
wait = nil
}
default:
// Path disallowed by robots.txt
f.visit(v, nil, ErrDisallowed)
wait = nil
}
// Every time a command is received, reset the ttl channel
ttl = time.After(f.WorkerIdleTTL)
case <-ttl:
// Worker has been idle for WorkerIdleTTL, terminate it
f.mu.Lock()
inch, ok := f.hosts[hostKey]
delete(f.hosts, hostKey)
// Close the queue if AutoClose is set and there are no more hosts.
if f.AutoClose && len(f.hosts) == 0 {
go f.q.Close()
}
f.mu.Unlock()
if ok {
close(inch)
}
break loop
}
}
// need to drain ch until it is closed, to prevent the producer goroutine
// from leaking.
for _ = range ch {
}
f.q.wg.Done()
} | go | {
"resource": ""
} |
q15237 | getRobotAgent | train | func (f *Fetcher) getRobotAgent(r robotCommand) *robotstxt.Group {
res, err := f.doRequest(r)
if err != nil {
// TODO: Ignore robots.txt request error?
fmt.Fprintf(os.Stderr, "fetchbot: error fetching robots.txt: %s\n", err)
return nil
}
if res.Body != nil {
defer res.Body.Close()
}
robData, err := robotstxt.FromResponse(res)
if err != nil {
// TODO : Ignore robots.txt parse error?
fmt.Fprintf(os.Stderr, "fetchbot: error parsing robots.txt: %s\n", err)
return nil
}
return robData.FindGroup(f.UserAgent)
} | go | {
"resource": ""
} |
q15238 | visit | train | func (f *Fetcher) visit(cmd Command, res *http.Response, err error) {
if res != nil && res.Body != nil {
defer res.Body.Close()
}
// if the Command implements Handler, call that handler, otherwise
// dispatch to the Fetcher's Handler.
if h, ok := cmd.(Handler); ok {
h.Handle(&Context{Cmd: cmd, Q: f.q}, res, err)
return
}
f.Handler.Handle(&Context{Cmd: cmd, Q: f.q}, res, err)
} | go | {
"resource": ""
} |
q15239 | doRequest | train | func (f *Fetcher) doRequest(cmd Command) (*http.Response, error) {
req, err := http.NewRequest(cmd.Method(), cmd.URL().String(), nil)
if err != nil {
return nil, err
}
// If the Command implements some other recognized interfaces, set
// the request accordingly (see cmd.go for the list of interfaces).
// First, the Header values.
if hd, ok := cmd.(HeaderProvider); ok {
for k, v := range hd.Header() {
req.Header[k] = v
}
}
// BasicAuth has higher priority than an Authorization header set by
// a HeaderProvider.
if ba, ok := cmd.(BasicAuthProvider); ok {
req.SetBasicAuth(ba.BasicAuth())
}
// Cookies are added to the request, even if some cookies were set
// by a HeaderProvider.
if ck, ok := cmd.(CookiesProvider); ok {
for _, c := range ck.Cookies() {
req.AddCookie(c)
}
}
// For the body of the request, ReaderProvider has higher priority
// than ValuesProvider.
if rd, ok := cmd.(ReaderProvider); ok {
rdr := rd.Reader()
rc, ok := rdr.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(rdr)
}
req.Body = rc
} else if val, ok := cmd.(ValuesProvider); ok {
v := val.Values()
req.Body = ioutil.NopCloser(strings.NewReader(v.Encode()))
if req.Header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
}
// If there was no User-Agent implicitly set by the HeaderProvider,
// set it to the default value.
if req.Header.Get("User-Agent") == "" {
req.Header.Set("User-Agent", f.UserAgent)
}
// Do the request.
res, err := f.HttpClient.Do(req)
if err != nil {
return nil, err
}
return res, nil
} | go | {
"resource": ""
} |
q15240 | sliceIQ | train | func sliceIQ(in <-chan Command, next chan<- Command) {
defer close(next)
// pending events (this is the "infinite" part)
pending := []Command{}
recv:
for {
// Ensure that pending always has values so the select can
// multiplex between the receiver and sender properly
if len(pending) == 0 {
v, ok := <-in
if !ok {
// in is closed, flush values
break
}
// We now have something to send
pending = append(pending, v)
}
select {
// Queue incoming values
case v, ok := <-in:
if !ok {
// in is closed, flush values
break recv
}
pending = append(pending, v)
// Send queued values
case next <- pending[0]:
pending[0] = nil
pending = pending[1:]
}
}
// After in is closed, we may still have events to send
for _, v := range pending {
next <- v
}
} | go | {
"resource": ""
} |
q15241 | NewPQueue | train | func NewPQueue(pqType PQType) *PQueue {
var cmp func(int, int) bool
if pqType == MAXPQ {
cmp = max
} else {
cmp = min
}
items := make([]*item, 1)
items[0] = nil // Heap queue first element should always be nil
return &PQueue{
items: items,
elemsCount: 0,
comparator: cmp,
}
} | go | {
"resource": ""
} |
q15242 | Push | train | func (pq *PQueue) Push(value interface{}, priority int) {
item := newItem(value, priority)
pq.Lock()
pq.items = append(pq.items, item)
pq.elemsCount += 1
pq.swim(pq.size())
pq.Unlock()
} | go | {
"resource": ""
} |
q15243 | Size | train | func (pq *PQueue) Size() int {
pq.RLock()
defer pq.RUnlock()
return pq.size()
} | go | {
"resource": ""
} |
q15244 | Empty | train | func (pq *PQueue) Empty() bool {
pq.RLock()
defer pq.RUnlock()
return pq.size() == 0
} | go | {
"resource": ""
} |
q15245 | NewCappedDeque | train | func NewCappedDeque(capacity int) *Deque {
return &Deque{
container: list.New(),
capacity: capacity,
}
} | go | {
"resource": ""
} |
q15246 | Size | train | func (s *Deque) Size() int {
s.RLock()
defer s.RUnlock()
return s.container.Len()
} | go | {
"resource": ""
} |
q15247 | Capacity | train | func (s *Deque) Capacity() int {
s.RLock()
defer s.RUnlock()
return s.capacity
} | go | {
"resource": ""
} |
q15248 | Empty | train | func (s *Deque) Empty() bool {
s.RLock()
defer s.RUnlock()
return s.container.Len() == 0
} | go | {
"resource": ""
} |
q15249 | Full | train | func (s *Deque) Full() bool {
s.RLock()
defer s.RUnlock()
return s.capacity >= 0 && s.container.Len() >= s.capacity
} | go | {
"resource": ""
} |
q15250 | AddSchedulerTaskFromTask | train | func AddSchedulerTaskFromTask(t core.Task) Task {
st := SchedulerTaskFromTask(t)
(&st).assertSchedule(t.Schedule())
st.Workflow = t.WMap()
return st
} | go | {
"resource": ""
} |
q15251 | SecurityTLSEnabled | train | func SecurityTLSEnabled(certPath, keyPath string, secureSide SecureSide) GRPCSecurity {
return GRPCSecurity{
TLSEnabled: true,
SecureSide: secureSide,
TLSCertPath: certPath,
TLSKeyPath: keyPath,
}
} | go | {
"resource": ""
} |
q15252 | SecurityTLSExtended | train | func SecurityTLSExtended(certPath, keyPath string, secureSide SecureSide, caCertPaths []string) GRPCSecurity {
return GRPCSecurity{
TLSEnabled: true,
SecureSide: secureSide,
TLSCertPath: certPath,
TLSKeyPath: keyPath,
CACertPaths: caCertPaths,
}
} | go | {
"resource": ""
} |
q15253 | NewCollectorGrpcClient | train | func NewCollectorGrpcClient(address string, timeout time.Duration, security GRPCSecurity) (PluginCollectorClient, error) {
ctx := context.Background()
p, err := newPluginGrpcClient(ctx, address, timeout, security, plugin.CollectorPluginType)
if err != nil {
return nil, err
}
return p.(PluginCollectorClient), err
} | go | {
"resource": ""
} |
q15254 | NewStreamCollectorGrpcClient | train | func NewStreamCollectorGrpcClient(address string, timeout time.Duration, security GRPCSecurity) (PluginStreamCollectorClient, error) {
ctx := context.Background()
p, err := newPluginGrpcClient(ctx, address, timeout, security, plugin.StreamCollectorPluginType)
if err != nil {
return nil, err
}
return p.(PluginStreamCollectorClient), nil
} | go | {
"resource": ""
} |
q15255 | NewProcessorGrpcClient | train | func NewProcessorGrpcClient(address string, timeout time.Duration, security GRPCSecurity) (PluginProcessorClient, error) {
ctx := context.Background()
p, err := newPluginGrpcClient(ctx, address, timeout, security, plugin.ProcessorPluginType)
if err != nil {
return nil, err
}
return p.(PluginProcessorClient), err
} | go | {
"resource": ""
} |
q15256 | NewPublisherGrpcClient | train | func NewPublisherGrpcClient(address string, timeout time.Duration, security GRPCSecurity) (PluginPublisherClient, error) {
ctx := context.Background()
p, err := newPluginGrpcClient(ctx, address, timeout, security, plugin.PublisherPluginType)
if err != nil {
return nil, err
}
return p.(PluginPublisherClient), err
} | go | {
"resource": ""
} |
q15257 | newPluginGrpcClient | train | func newPluginGrpcClient(ctx context.Context, address string, timeout time.Duration, security GRPCSecurity, typ plugin.PluginType) (interface{}, error) {
address, port, err := parseAddress(address)
if err != nil {
return nil, err
}
var p *grpcClient
var creds credentials.TransportCredentials
if creds, err = buildCredentials(security); err != nil {
return nil, err
}
p, err = newGrpcClient(ctx, address, int(port), timeout, typ, creds)
if err != nil {
return nil, err
}
return p, nil
} | go | {
"resource": ""
} |
q15258 | GetConfigPolicy | train | func (f *Mock) GetConfigPolicy() (*cpolicy.ConfigPolicy, error) {
c := cpolicy.New()
rule, _ := cpolicy.NewStringRule("name", false, "bob")
rule2, _ := cpolicy.NewStringRule("password", true)
p := cpolicy.NewPolicyNode()
p.Add(rule)
p.Add(rule2)
c.Add([]string{"intel", "mock", "foo"}, p)
return c, nil
} | go | {
"resource": ""
} |
q15259 | beforeAction | train | func beforeAction(ctx *cli.Context) error {
username, password := checkForAuth(ctx)
pClient, err = client.New(ctx.String("url"), ctx.String("api-version"), ctx.Bool("insecure"), client.Timeout(ctx.Duration("timeout")))
if err != nil {
return fmt.Errorf("%v", err)
}
pClient.Password = password
pClient.Username = username
if err = checkTribeCommand(ctx); err != nil {
return fmt.Errorf("%v", err)
}
return nil
} | go | {
"resource": ""
} |
q15260 | checkTribeCommand | train | func checkTribeCommand(ctx *cli.Context) error {
tribe := false
for _, a := range os.Args {
for _, command := range tribeCommands {
if strings.Contains(a, command.Name) {
tribe = true
break
}
}
if tribe {
break
}
}
if !tribe {
return nil
}
resp := pClient.ListAgreements()
if resp.Err != nil {
if resp.Err.Error() == "Invalid credentials" {
return resp.Err
}
return fmt.Errorf("Tribe mode must be enabled in snapteld to use tribe command")
}
return nil
} | go | {
"resource": ""
} |
q15261 | Select | train | func (l *lru) Select(aps []AvailablePlugin, _ string) (AvailablePlugin, error) {
t := time.Now()
index := -1
for i, ap := range aps {
// look for the least recently used
if ap.LastHit().Before(t) || index == -1 {
index = i
t = ap.LastHit()
}
}
if index > -1 {
l.logger.WithFields(log.Fields{
"block": "select",
"strategy": l.String(),
"pool size": len(aps),
"index": aps[index].String(),
"hitcount": aps[index].HitCount(),
}).Debug("plugin selected")
return aps[index], nil
}
l.logger.WithFields(log.Fields{
"block": "select",
"strategy": l.String(),
"error": ErrCouldNotSelect,
}).Error("error selecting")
return nil, ErrCouldNotSelect
} | go | {
"resource": ""
} |
q15262 | Remove | train | func (l *lru) Remove(aps []AvailablePlugin, taskID string) (AvailablePlugin, error) {
ap, err := l.Select(aps, taskID)
if err != nil {
return nil, err
}
return ap, nil
} | go | {
"resource": ""
} |
q15263 | ListMembers | train | func (c *Client) ListMembers() *ListMembersResult {
resp, err := c.do("GET", "/tribe/members", ContentTypeJSON, nil)
if err != nil {
return &ListMembersResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeMemberListType:
// Success
return &ListMembersResult{resp.Body.(*rbody.TribeMemberList), nil}
case rbody.ErrorType:
return &ListMembersResult{Err: resp.Body.(*rbody.Error)}
default:
return &ListMembersResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15264 | GetMember | train | func (c *Client) GetMember(name string) *GetMemberResult {
resp, err := c.do("GET", fmt.Sprintf("/tribe/member/%s", name), ContentTypeJSON, nil)
if err != nil {
return &GetMemberResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeMemberShowType:
// Success
return &GetMemberResult{resp.Body.(*rbody.TribeMemberShow), nil}
case rbody.ErrorType:
return &GetMemberResult{Err: resp.Body.(*rbody.Error)}
default:
return &GetMemberResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15265 | ListAgreements | train | func (c *Client) ListAgreements() *ListAgreementResult {
resp, err := c.do("GET", "/tribe/agreements", ContentTypeJSON, nil)
if err != nil {
return &ListAgreementResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeListAgreementType:
return &ListAgreementResult{resp.Body.(*rbody.TribeListAgreement), nil}
case rbody.ErrorType:
return &ListAgreementResult{Err: resp.Body.(*rbody.Error)}
default:
return &ListAgreementResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15266 | AddAgreement | train | func (c *Client) AddAgreement(name string) *AddAgreementResult {
b, err := json.Marshal(struct {
Name string `json:"name"`
}{Name: name})
if err != nil {
return &AddAgreementResult{Err: err}
}
resp, err := c.do("POST", "/tribe/agreements", ContentTypeJSON, b)
if err != nil {
return &AddAgreementResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeAddAgreementType:
return &AddAgreementResult{resp.Body.(*rbody.TribeAddAgreement), nil}
case rbody.ErrorType:
return &AddAgreementResult{Err: resp.Body.(*rbody.Error)}
default:
return &AddAgreementResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15267 | DeleteAgreement | train | func (c *Client) DeleteAgreement(name string) *DeleteAgreementResult {
resp, err := c.do("DELETE", fmt.Sprintf("/tribe/agreements/%s", name), ContentTypeJSON, nil)
if err != nil {
return &DeleteAgreementResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeDeleteAgreementType:
return &DeleteAgreementResult{resp.Body.(*rbody.TribeDeleteAgreement), nil}
case rbody.ErrorType:
return &DeleteAgreementResult{Err: resp.Body.(*rbody.Error)}
default:
return &DeleteAgreementResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15268 | GetAgreement | train | func (c *Client) GetAgreement(name string) *GetAgreementResult {
resp, err := c.do("GET", fmt.Sprintf("/tribe/agreements/%s", name), ContentTypeJSON, nil)
if err != nil {
return &GetAgreementResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeGetAgreementType:
return &GetAgreementResult{resp.Body.(*rbody.TribeGetAgreement), nil}
case rbody.ErrorType:
return &GetAgreementResult{Err: resp.Body.(*rbody.Error)}
default:
return &GetAgreementResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15269 | JoinAgreement | train | func (c *Client) JoinAgreement(agreementName, memberName string) *JoinAgreementResult {
b, err := json.Marshal(struct {
MemberName string `json:"member_name"`
}{MemberName: memberName})
if err != nil {
return &JoinAgreementResult{Err: err}
}
resp, err := c.do("PUT", fmt.Sprintf("/tribe/agreements/%s/join", agreementName), ContentTypeJSON, b)
if err != nil {
return &JoinAgreementResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeJoinAgreementType:
return &JoinAgreementResult{resp.Body.(*rbody.TribeJoinAgreement), nil}
case rbody.ErrorType:
return &JoinAgreementResult{Err: resp.Body.(*rbody.Error)}
default:
return &JoinAgreementResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15270 | LeaveAgreement | train | func (c *Client) LeaveAgreement(agreementName, memberName string) *LeaveAgreementResult {
b, err := json.Marshal(struct {
MemberName string `json:"member_name"`
}{MemberName: memberName})
if err != nil {
return &LeaveAgreementResult{Err: err}
}
resp, err := c.do("DELETE", fmt.Sprintf("/tribe/agreements/%s/leave", agreementName), ContentTypeJSON, b)
if err != nil {
return &LeaveAgreementResult{Err: err}
}
switch resp.Meta.Type {
case rbody.TribeLeaveAgreementType:
return &LeaveAgreementResult{resp.Body.(*rbody.TribeLeaveAgreement), nil}
case rbody.ErrorType:
return &LeaveAgreementResult{Err: resp.Body.(*rbody.Error)}
default:
return &LeaveAgreementResult{Err: ErrAPIResponseMetaType}
}
} | go | {
"resource": ""
} |
q15271 | ToNamespace | train | func ToNamespace(n core.Namespace) []*NamespaceElement {
elements := make([]*NamespaceElement, 0, len(n))
for _, value := range n {
ne := &NamespaceElement{
Value: value.Value,
Description: value.Description,
Name: value.Name,
}
elements = append(elements, ne)
}
return elements
} | go | {
"resource": ""
} |
q15272 | ToCoreMetric | train | func ToCoreMetric(mt *Metric) core.Metric {
var lastAdvertisedTime time.Time
// if the lastAdvertisedTime is not set we handle. -62135596800 represents the
// number of seconds from 0001-1970 and is the default value for time.Unix.
if mt.LastAdvertisedTime.Sec == int64(-62135596800) {
lastAdvertisedTime = time.Unix(time.Now().Unix(), int64(time.Now().Nanosecond()))
} else {
lastAdvertisedTime = time.Unix(mt.LastAdvertisedTime.Sec, mt.LastAdvertisedTime.Nsec)
}
ret := &metric{
namespace: ToCoreNamespace(mt.Namespace),
version: int(mt.Version),
tags: mt.Tags,
timeStamp: time.Unix(mt.Timestamp.Sec, mt.Timestamp.Nsec),
lastAdvertisedTime: lastAdvertisedTime,
config: ConfigMapToConfig(mt.Config),
description: mt.Description,
unit: mt.Unit,
}
switch mt.Data.(type) {
case *Metric_BytesData:
ret.data = mt.GetBytesData()
case *Metric_StringData:
ret.data = mt.GetStringData()
case *Metric_Float32Data:
ret.data = mt.GetFloat32Data()
case *Metric_Float64Data:
ret.data = mt.GetFloat64Data()
case *Metric_Int32Data:
ret.data = mt.GetInt32Data()
case *Metric_Int64Data:
ret.data = mt.GetInt64Data()
case *Metric_Uint32Data:
ret.data = mt.GetUint32Data()
case *Metric_Uint64Data:
ret.data = mt.GetUint64Data()
case *Metric_BoolData:
ret.data = mt.GetBoolData()
}
return ret
} | go | {
"resource": ""
} |
q15273 | ToCoreNamespace | train | func ToCoreNamespace(n []*NamespaceElement) core.Namespace {
var namespace core.Namespace
for _, val := range n {
ele := core.NamespaceElement{
Value: val.Value,
Description: val.Description,
Name: val.Name,
}
namespace = append(namespace, ele)
}
return namespace
} | go | {
"resource": ""
} |
q15274 | ToSubPluginMsg | train | func ToSubPluginMsg(pl core.SubscribedPlugin) *SubscribedPlugin {
return &SubscribedPlugin{
TypeName: pl.TypeName(),
Name: pl.Name(),
Version: int64(pl.Version()),
Config: ConfigToConfigMap(pl.Config()),
}
} | go | {
"resource": ""
} |
q15275 | ToSubPlugin | train | func ToSubPlugin(msg *SubscribedPlugin) core.SubscribedPlugin {
return SubPlugin{
typeName: msg.TypeName,
name: msg.Name,
version: int(msg.Version),
config: ConfigMapToConfig(msg.Config),
}
} | go | {
"resource": ""
} |
q15276 | ToCorePluginMsg | train | func ToCorePluginMsg(pl core.Plugin) *Plugin {
return &Plugin{
TypeName: pl.TypeName(),
Name: pl.Name(),
Version: int64(pl.Version()),
}
} | go | {
"resource": ""
} |
q15277 | ToCorePluginsMsg | train | func ToCorePluginsMsg(pls []core.Plugin) []*Plugin {
plugins := make([]*Plugin, len(pls))
for i, v := range pls {
plugins[i] = ToCorePluginMsg(v)
}
return plugins
} | go | {
"resource": ""
} |
q15278 | MsgToCorePlugin | train | func MsgToCorePlugin(msg *Plugin) core.Plugin {
pl := &SubPlugin{
typeName: msg.TypeName,
name: msg.Name,
version: int(msg.Version),
}
return core.Plugin(pl)
} | go | {
"resource": ""
} |
q15279 | MsgToCorePlugins | train | func MsgToCorePlugins(msg []*Plugin) []core.Plugin {
plugins := make([]core.Plugin, len(msg))
for i, v := range msg {
plugins[i] = MsgToCorePlugin(v)
}
return plugins
} | go | {
"resource": ""
} |
q15280 | ToSubPlugins | train | func ToSubPlugins(msg []*SubscribedPlugin) []core.SubscribedPlugin {
plugins := make([]core.SubscribedPlugin, len(msg))
for i, v := range msg {
plugins[i] = ToSubPlugin(v)
}
return plugins
} | go | {
"resource": ""
} |
q15281 | ToSubPluginsMsg | train | func ToSubPluginsMsg(sp []core.SubscribedPlugin) []*SubscribedPlugin {
plugins := make([]*SubscribedPlugin, len(sp))
for i, v := range sp {
plugins[i] = ToSubPluginMsg(v)
}
return plugins
} | go | {
"resource": ""
} |
q15282 | ConfigMapToConfig | train | func ConfigMapToConfig(cfg *ConfigMap) *cdata.ConfigDataNode {
if cfg == nil {
return nil
}
config := cdata.FromTable(ParseConfig(cfg))
return config
} | go | {
"resource": ""
} |
q15283 | ConfigToConfigMap | train | func ConfigToConfigMap(cd *cdata.ConfigDataNode) *ConfigMap {
if cd == nil {
return nil
}
return ToConfigMap(cd.Table())
} | go | {
"resource": ""
} |
q15284 | ConvertSnapErrors | train | func ConvertSnapErrors(s []*SnapError) []serror.SnapError {
rerrs := make([]serror.SnapError, len(s))
for i, err := range s {
rerrs[i] = serror.New(errors.New(err.ErrorString), GetFields(err))
}
return rerrs
} | go | {
"resource": ""
} |
q15285 | ToSnapError | train | func ToSnapError(e *SnapError) serror.SnapError {
if e == nil {
return nil
}
return serror.New(errors.New(e.ErrorString), GetFields(e))
} | go | {
"resource": ""
} |
q15286 | GetFields | train | func GetFields(s *SnapError) map[string]interface{} {
fields := make(map[string]interface{}, len(s.ErrorFields))
for key, value := range s.ErrorFields {
fields[key] = value
}
return fields
} | go | {
"resource": ""
} |
q15287 | Get | train | func (m *managers) Get(key string) (managesMetrics, error) {
if key == "" {
return m.local, nil
}
m.mutex.RLock()
defer m.mutex.RUnlock()
if val, ok := m.remoteManagers[key]; ok {
return val, nil
} else {
return nil, errors.New(fmt.Sprintf("Client not found for: %v", key))
}
} | go | {
"resource": ""
} |
q15288 | wmapToWorkflow | train | func wmapToWorkflow(wfMap *wmap.WorkflowMap) (*schedulerWorkflow, error) {
wf := &schedulerWorkflow{}
err := convertCollectionNode(wfMap.Collect, wf)
if err != nil {
return nil, err
}
// ***
// TODO validate workflow makes sense here
// - flows that don't end in publishers?
// - duplicate child nodes anywhere?
//***
// Retain a copy of the original workflow map
wf.workflowMap = wfMap
return wf, nil
} | go | {
"resource": ""
} |
q15289 | Start | train | func (s *schedulerWorkflow) Start(t *task) {
workflowLogger.WithFields(log.Fields{
"_block": "workflow-start",
"task-id": t.id,
"task-name": t.name,
}).Debug("Starting workflow")
s.state = WorkflowStarted
j := newCollectorJob(s.metrics, t.deadlineDuration, t.metricsManager, t.workflow.configTree, t.id, s.tags)
// dispatch 'collect' job to be worked
// Block until the job has been either run or skipped.
errors := t.manager.Work(j).Promise().Await()
if len(errors) > 0 {
t.RecordFailure(errors)
event := new(scheduler_event.MetricCollectionFailedEvent)
event.TaskID = t.id
event.Errors = errors
defer s.eventEmitter.Emit(event)
return
}
// Send event
event := new(scheduler_event.MetricCollectedEvent)
event.TaskID = t.id
event.Metrics = j.(*collectorJob).metrics
defer s.eventEmitter.Emit(event)
// walk through the tree and dispatch work
workJobs(s.processNodes, s.publishNodes, t, j)
} | go | {
"resource": ""
} |
q15290 | workJobs | train | func workJobs(prs []*processNode, pus []*publishNode, t *task, pj job) {
// optimize for no jobs
if len(prs) == 0 && len(pus) == 0 {
return
}
// Create waitgroup to block until all jobs are submitted
wg := &sync.WaitGroup{}
workflowLogger.WithFields(log.Fields{
"_block": "work-jobs",
"task-id": t.id,
"task-name": t.name,
"count-process-nodes": len(prs),
"count-publish-nodes": len(pus),
"parent-node-type": pj.TypeString(),
}).Debug("Batch submission of process and publish nodes")
// range over the process jobs and call submitProcessJob
for _, pr := range prs {
// increment the wait group (before starting goroutine to prevent a race condition)
wg.Add(1)
// Start goroutine to submit the process job
go submitProcessJob(pj, t, wg, pr)
}
// range over the publish jobs and call submitPublishJob
for _, pu := range pus {
// increment the wait group (before starting goroutine to prevent a race condition)
wg.Add(1)
// Start goroutine to submit the process job
go submitPublishJob(pj, t, wg, pu)
}
// Wait until all job submisson goroutines are done
wg.Wait()
workflowLogger.WithFields(log.Fields{
"_block": "work-jobs",
"task-id": t.id,
"task-name": t.name,
"count-process-nodes": len(prs),
"count-publish-nodes": len(pus),
"parent-node-type": pj.TypeString(),
}).Debug("Batch submission complete")
} | go | {
"resource": ""
} |
q15291 | GetClientConnection | train | func GetClientConnection(ctx context.Context, addr string, port int) (*grpc.ClientConn, error) {
return GetClientConnectionWithCreds(ctx, addr, port, nil)
} | go | {
"resource": ""
} |
q15292 | IsTLSEnabled | train | func (p *Config) IsTLSEnabled() bool {
if p.TLSCertPath != "" && p.TLSKeyPath != "" {
return true
}
return false
} | go | {
"resource": ""
} |
q15293 | WritePEMFile | train | func (u CertTestUtil) WritePEMFile(fn string, pemHeader string, b []byte) error {
f, err := os.Create(fn)
if err != nil {
return err
}
defer f.Close()
w := bufio.NewWriter(f)
pem.Encode(w, &pem.Block{
Type: pemHeader,
Bytes: b,
})
w.Flush()
return nil
} | go | {
"resource": ""
} |
q15294 | MakeCACertKeyPair | train | func (u CertTestUtil) MakeCACertKeyPair(caName, ouName string, keyValidPeriod time.Duration) (caCertTpl *x509.Certificate, caCertBytes []byte, caPrivKey *rsa.PrivateKey, err error) {
caPrivKey, err = rsa.GenerateKey(rand.Reader, keyBitsDefault)
if err != nil {
return nil, nil, nil, err
}
caPubKey := caPrivKey.Public()
caPubBytes, err := x509.MarshalPKIXPublicKey(caPubKey)
if err != nil {
return nil, nil, nil, err
}
caPubSha256 := sha256.Sum256(caPubBytes)
caCertTpl = &x509.Certificate{
SignatureAlgorithm: defaultSignatureAlgorithm,
PublicKeyAlgorithm: defaultPublicKeyAlgorithm,
Version: 3,
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: caName,
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(keyValidPeriod),
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
BasicConstraintsValid: true,
MaxPathLenZero: true,
IsCA: true,
SubjectKeyId: caPubSha256[:],
}
caCertBytes, err = x509.CreateCertificate(rand.Reader, caCertTpl, caCertTpl, caPubKey, caPrivKey)
if err != nil {
return nil, nil, nil, err
}
return caCertTpl, caCertBytes, caPrivKey, nil
} | go | {
"resource": ""
} |
q15295 | MakeSubjCertKeyPair | train | func (u CertTestUtil) MakeSubjCertKeyPair(cn, ou string, keyValidPeriod time.Duration, caCertTpl *x509.Certificate, caPrivKey *rsa.PrivateKey) (subjCertBytes []byte, subjPrivKey *rsa.PrivateKey, err error) {
subjPrivKey, err = rsa.GenerateKey(rand.Reader, keyBitsDefault)
if err != nil {
return nil, nil, err
}
subjPubBytes, err := x509.MarshalPKIXPublicKey(subjPrivKey.Public())
if err != nil {
return nil, nil, err
}
subjPubSha256 := sha256.Sum256(subjPubBytes)
subjCertTpl := x509.Certificate{
SignatureAlgorithm: defaultSignatureAlgorithm,
PublicKeyAlgorithm: defaultPublicKeyAlgorithm,
Version: 3,
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
OrganizationalUnit: []string{ou},
CommonName: cn,
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(keyValidPeriod),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageDataEncipherment | x509.KeyUsageKeyAgreement,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
SubjectKeyId: subjPubSha256[:],
}
subjCertTpl.DNSNames = []string{"localhost"}
subjCertTpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")}
subjCertBytes, err = x509.CreateCertificate(rand.Reader, &subjCertTpl, caCertTpl, subjPrivKey.Public(), caPrivKey)
return subjCertBytes, subjPrivKey, err
} | go | {
"resource": ""
} |
q15296 | GetConfigTree | train | func (c *CollectWorkflowMapNode) GetConfigTree() (*cdata.ConfigDataTree, error) {
cdt := cdata.NewTree()
// Iterate over config and attempt to convert into data nodes in the tree
for ns_, cmap := range c.Config {
ns := strings.Split(ns_, "/")[1:]
cdn, err := configtoConfigDataNode(cmap, ns_)
if err != nil {
return nil, err
}
cdt.Add(ns, cdn)
}
return cdt, nil
} | go | {
"resource": ""
} |
q15297 | mergeCliOptions | train | func (t *task) mergeCliOptions(ctx *cli.Context) error {
// set the name of the task (if a 'name' was provided in the CLI options)
name := ctx.String("name")
if ctx.IsSet("name") || name != "" {
t.Name = name
}
// set the deadline of the task (if a 'deadline' was provided in the CLI options)
deadline := ctx.String("deadline")
if ctx.IsSet("deadline") || deadline != "" {
t.Deadline = deadline
}
// set the MaxFailures for the task (if a 'max-failures' value was provided in the CLI options)
maxFailuresStrVal := ctx.String("max-failures")
if ctx.IsSet("max-failures") || maxFailuresStrVal != "" {
maxFailures, err := stringValToInt(maxFailuresStrVal)
if err != nil {
return err
}
t.MaxFailures = maxFailures
}
// set the schedule for the task from the CLI options (and return the results
// of that method call, indicating whether or not an error was encountered while
// setting up that schedule)
return t.setScheduleFromCliOptions(ctx)
} | go | {
"resource": ""
} |
q15298 | validateSchema | train | func (r *schemaValidatorType) validateSchema(schema, cfg string) []serror.SnapError {
schemaLoader := gojsonschema.NewStringLoader(schema)
testDoc := gojsonschema.NewStringLoader(cfg)
result, err := gojsonschema.Validate(schemaLoader, testDoc)
var serrors []serror.SnapError
// Check for invalid json
if err != nil {
serrors = append(serrors, serror.New(err))
return serrors
}
// check if result passes validation
if result.Valid() {
return nil
}
for _, err := range result.Errors() {
serr := serror.New(errors.New("Validate schema error"))
serr.SetFields(map[string]interface{}{
"value": err.Value(),
"context": err.Context().String("::"),
"description": err.Description(),
})
serrors = append(serrors, serr)
}
return serrors
} | go | {
"resource": ""
} |
q15299 | newTask | train | func newTask(s schedule.Schedule, wf *schedulerWorkflow, m *workManager, mm managesMetrics, emitter gomit.Emitter, opts ...core.TaskOption) (*task, error) {
//Task would always be given a default name.
//However if a user want to change this name, she can pass optional arguments, in form of core.TaskOption
//The new name then get over written.
taskID := uuid.New()
name := fmt.Sprintf("Task-%s", taskID)
wf.eventEmitter = emitter
mgrs := newManagers(mm)
err := createTaskClients(&mgrs, wf)
if err != nil {
return nil, err
}
_, stream := s.(*schedule.StreamingSchedule)
task := &task{
id: taskID,
name: name,
schResponseChan: make(chan schedule.Response),
schedule: s,
state: core.TaskStopped,
creationTime: time.Now(),
workflow: wf,
manager: m,
metricsManager: mm,
deadlineDuration: DefaultDeadlineDuration,
stopOnFailure: DefaultStopOnFailure,
eventEmitter: emitter,
RemoteManagers: mgrs,
isStream: stream,
}
//set options
for _, opt := range opts {
opt(task)
}
return task, nil
} | go | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.